aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig37
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/dts/dbx5x0.dtsi10
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts26
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi44
-rw-r--r--arch/arm/boot/dts/highbank.dts10
-rw-r--r--arch/arm/boot/dts/imx23.dtsi2
-rw-r--r--arch/arm/boot/dts/prima2.dtsi31
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi30
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino.dts2
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi23
-rw-r--r--arch/arm/boot/dts/vt8500.dtsi40
-rw-r--r--arch/arm/boot/dts/wm8505.dtsi60
-rw-r--r--arch/arm/boot/dts/wm8650.dtsi20
-rw-r--r--arch/arm/boot/dts/zynq-7000.dtsi4
-rw-r--r--arch/arm/common/gic.c25
-rw-r--r--arch/arm/crypto/aes-armv4.S64
-rw-r--r--arch/arm/crypto/sha1-armv4-large.S24
-rw-r--r--arch/arm/include/asm/assembler.h10
-rw-r--r--arch/arm/include/asm/cputype.h33
-rw-r--r--arch/arm/include/asm/cti.h10
-rw-r--r--arch/arm/include/asm/hardware/coresight.h6
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h3
-rw-r--r--arch/arm/include/asm/idmap.h1
-rw-r--r--arch/arm/include/asm/kvm_arm.h214
-rw-r--r--arch/arm/include/asm/kvm_asm.h82
-rw-r--r--arch/arm/include/asm/kvm_coproc.h47
-rw-r--r--arch/arm/include/asm/kvm_emulate.h72
-rw-r--r--arch/arm/include/asm/kvm_host.h161
-rw-r--r--arch/arm/include/asm/kvm_mmio.h56
-rw-r--r--arch/arm/include/asm/kvm_mmu.h50
-rw-r--r--arch/arm/include/asm/kvm_psci.h23
-rw-r--r--arch/arm/include/asm/mach/pci.h1
-rw-r--r--arch/arm/include/asm/memory.h10
-rw-r--r--arch/arm/include/asm/opcodes-sec.h24
-rw-r--r--arch/arm/include/asm/opcodes.h1
-rw-r--r--arch/arm/include/asm/outercache.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h5
-rw-r--r--arch/arm/include/asm/pgtable-3level.h18
-rw-r--r--arch/arm/include/asm/pgtable.h7
-rw-r--r--arch/arm/include/asm/psci.h36
-rw-r--r--arch/arm/include/asm/smp_scu.h8
-rw-r--r--arch/arm/include/asm/spinlock.h16
-rw-r--r--arch/arm/include/asm/virt.h4
-rw-r--r--arch/arm/include/uapi/asm/kvm.h164
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/asm-offsets.c25
-rw-r--r--arch/arm/kernel/bios32.c9
-rw-r--r--arch/arm/kernel/hw_breakpoint.c61
-rw-r--r--arch/arm/kernel/perf_event.c16
-rw-r--r--arch/arm/kernel/perf_event_cpu.c51
-rw-r--r--arch/arm/kernel/perf_event_v6.c4
-rw-r--r--arch/arm/kernel/perf_event_v7.c18
-rw-r--r--arch/arm/kernel/perf_event_xscale.c2
-rw-r--r--arch/arm/kernel/process.c13
-rw-r--r--arch/arm/kernel/psci.c211
-rw-r--r--arch/arm/kernel/sched_clock.c4
-rw-r--r--arch/arm/kernel/smp.c31
-rw-r--r--arch/arm/kernel/smp_scu.c2
-rw-r--r--arch/arm/kernel/smp_twd.c53
-rw-r--r--arch/arm/kernel/vmlinux.lds.S6
-rw-r--r--arch/arm/kvm/Kconfig56
-rw-r--r--arch/arm/kvm/Makefile21
-rw-r--r--arch/arm/kvm/arm.c1015
-rw-r--r--arch/arm/kvm/coproc.c1046
-rw-r--r--arch/arm/kvm/coproc.h153
-rw-r--r--arch/arm/kvm/coproc_a15.c162
-rw-r--r--arch/arm/kvm/emulate.c373
-rw-r--r--arch/arm/kvm/guest.c222
-rw-r--r--arch/arm/kvm/init.S114
-rw-r--r--arch/arm/kvm/interrupts.S478
-rw-r--r--arch/arm/kvm/interrupts_head.S441
-rw-r--r--arch/arm/kvm/mmio.c153
-rw-r--r--arch/arm/kvm/mmu.c787
-rw-r--r--arch/arm/kvm/psci.c108
-rw-r--r--arch/arm/kvm/reset.c74
-rw-r--r--arch/arm/kvm/trace.h235
-rw-r--r--arch/arm/mach-davinci/cpuidle.c84
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-exynos/include/mach/cpufreq.h19
-rw-r--r--arch/arm/mach-exynos/mach-exynos5-dt.c6
-rw-r--r--arch/arm/mach-highbank/Kconfig4
-rw-r--r--arch/arm/mach-highbank/core.h1
-rw-r--r--arch/arm/mach-highbank/highbank.c6
-rw-r--r--arch/arm/mach-highbank/sysregs.h4
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c2
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c2
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c2
-rw-r--r--arch/arm/mach-omap2/board-ldp.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c12
-rw-r--r--arch/arm/mach-omap2/board-omap3logic.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c2
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c2
-rw-r--r--arch/arm/mach-omap2/board-overo.c2
-rw-r--r--arch/arm/mach-omap2/board-rm680.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom-peripherals.c2
-rw-r--r--arch/arm/mach-omap2/devices.c45
-rw-r--r--arch/arm/mach-omap2/gpmc.c8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c13
-rw-r--r--arch/arm/mach-omap2/pm34xx.c2
-rw-r--r--arch/arm/mach-omap2/usb-musb.c3
-rw-r--r--arch/arm/mach-pxa/pxa27x.c20
-rw-r--r--arch/arm/mach-realview/include/mach/irqs-eb.h2
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig4
-rw-r--r--arch/arm/mach-sa1100/assabet.c1
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c42
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c34
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c20
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c42
-rw-r--r--arch/arm/mach-sunxi/Kconfig1
-rw-r--r--arch/arm/mach-tegra/cpu-tegra.c3
-rw-r--r--arch/arm/mach-tegra/tegra2_emc.c8
-rw-r--r--arch/arm/mach-ux500/Kconfig6
-rw-r--r--arch/arm/mach-ux500/board-mop500.c20
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c2
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-board-mop500.h10
-rw-r--r--arch/arm/mach-versatile/core.c15
-rw-r--r--arch/arm/mach-versatile/pci.c11
-rw-r--r--arch/arm/mm/Kconfig10
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/context.c3
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/idmap.c55
-rw-r--r--arch/arm/mm/ioremap.c135
-rw-r--r--arch/arm/mm/mm.h12
-rw-r--r--arch/arm/mm/mmu.c58
-rw-r--r--arch/arm/mm/proc-macros.S5
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7-2level.S2
-rw-r--r--arch/arm/mm/proc-v7-3level.S2
-rw-r--r--arch/arm/mm/vmregion.c205
-rw-r--r--arch/arm/mm/vmregion.h31
-rw-r--r--arch/arm/net/bpf_jit_32.c15
-rw-r--r--arch/arm/plat-omap/dmtimer.c8
-rw-r--r--arch/arm/plat-samsung/adc.c8
-rw-r--r--arch/arm/plat-samsung/dma-ops.c10
-rw-r--r--arch/arm/plat-samsung/include/plat/adc.h1
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-ops.h3
-rw-r--r--arch/arm/plat-samsung/s3c-dma-ops.c3
147 files changed, 7691 insertions, 828 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 67874b82a4ed..f410cb16dd22 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -36,7 +36,6 @@ config ARM
36 select HAVE_GENERIC_HARDIRQS 36 select HAVE_GENERIC_HARDIRQS
37 select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) 37 select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
38 select HAVE_IDE if PCI || ISA || PCMCIA 38 select HAVE_IDE if PCI || ISA || PCMCIA
39 select HAVE_IRQ_WORK
40 select HAVE_KERNEL_GZIP 39 select HAVE_KERNEL_GZIP
41 select HAVE_KERNEL_LZMA 40 select HAVE_KERNEL_LZMA
42 select HAVE_KERNEL_LZO 41 select HAVE_KERNEL_LZO
@@ -1531,7 +1530,6 @@ config SMP
1531 1530
1532config SMP_ON_UP 1531config SMP_ON_UP
1533 bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)" 1532 bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)"
1534 depends on EXPERIMENTAL
1535 depends on SMP && !XIP_KERNEL 1533 depends on SMP && !XIP_KERNEL
1536 default y 1534 default y
1537 help 1535 help
@@ -1620,6 +1618,16 @@ config HOTPLUG_CPU
1620 Say Y here to experiment with turning CPUs off and on. CPUs 1618 Say Y here to experiment with turning CPUs off and on. CPUs
1621 can be controlled through /sys/devices/system/cpu. 1619 can be controlled through /sys/devices/system/cpu.
1622 1620
1621config ARM_PSCI
1622 bool "Support for the ARM Power State Coordination Interface (PSCI)"
1623 depends on CPU_V7
1624 help
1625 Say Y here if you want Linux to communicate with system firmware
1626 implementing the PSCI specification for CPU-centric power
1627 management operations described in ARM document number ARM DEN
1628 0022A ("Power State Coordination Interface System Software on
1629 ARM processors").
1630
1623config LOCAL_TIMERS 1631config LOCAL_TIMERS
1624 bool "Use local timer interrupts" 1632 bool "Use local timer interrupts"
1625 depends on SMP 1633 depends on SMP
@@ -1637,7 +1645,7 @@ config ARCH_NR_GPIO
1637 default 355 if ARCH_U8500 1645 default 355 if ARCH_U8500
1638 default 264 if MACH_H4700 1646 default 264 if MACH_H4700
1639 default 512 if SOC_OMAP5 1647 default 512 if SOC_OMAP5
1640 default 288 if ARCH_VT8500 1648 default 288 if ARCH_VT8500 || ARCH_SUNXI
1641 default 0 1649 default 0
1642 help 1650 help
1643 Maximum number of GPIOs in the system. 1651 Maximum number of GPIOs in the system.
@@ -1655,6 +1663,9 @@ config HZ
1655 default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE 1663 default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
1656 default 100 1664 default 100
1657 1665
1666config SCHED_HRTICK
1667 def_bool HIGH_RES_TIMERS
1668
1658config THUMB2_KERNEL 1669config THUMB2_KERNEL
1659 bool "Compile the kernel in Thumb-2 mode" 1670 bool "Compile the kernel in Thumb-2 mode"
1660 depends on CPU_V7 && !CPU_V6 && !CPU_V6K 1671 depends on CPU_V7 && !CPU_V6 && !CPU_V6K
@@ -1719,7 +1730,7 @@ config AEABI
1719 1730
1720config OABI_COMPAT 1731config OABI_COMPAT
1721 bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)" 1732 bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
1722 depends on AEABI && EXPERIMENTAL && !THUMB2_KERNEL 1733 depends on AEABI && !THUMB2_KERNEL
1723 default y 1734 default y
1724 help 1735 help
1725 This option preserves the old syscall interface along with the 1736 This option preserves the old syscall interface along with the
@@ -1843,7 +1854,6 @@ config SECCOMP
1843 1854
1844config CC_STACKPROTECTOR 1855config CC_STACKPROTECTOR
1845 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" 1856 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
1846 depends on EXPERIMENTAL
1847 help 1857 help
1848 This option turns on the -fstack-protector GCC feature. This 1858 This option turns on the -fstack-protector GCC feature. This
1849 feature puts, at the beginning of functions, a canary value on 1859 feature puts, at the beginning of functions, a canary value on
@@ -1860,7 +1870,7 @@ config XEN_DOM0
1860 1870
1861config XEN 1871config XEN
1862 bool "Xen guest support on ARM (EXPERIMENTAL)" 1872 bool "Xen guest support on ARM (EXPERIMENTAL)"
1863 depends on EXPERIMENTAL && ARM && OF 1873 depends on ARM && OF
1864 depends on CPU_V7 && !CPU_V6 1874 depends on CPU_V7 && !CPU_V6
1865 help 1875 help
1866 Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. 1876 Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
@@ -1929,7 +1939,7 @@ config ZBOOT_ROM
1929 1939
1930choice 1940choice
1931 prompt "Include SD/MMC loader in zImage (EXPERIMENTAL)" 1941 prompt "Include SD/MMC loader in zImage (EXPERIMENTAL)"
1932 depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL 1942 depends on ZBOOT_ROM && ARCH_SH7372
1933 default ZBOOT_ROM_NONE 1943 default ZBOOT_ROM_NONE
1934 help 1944 help
1935 Include experimental SD/MMC loading code in the ROM-able zImage. 1945 Include experimental SD/MMC loading code in the ROM-able zImage.
@@ -1958,7 +1968,7 @@ endchoice
1958 1968
1959config ARM_APPENDED_DTB 1969config ARM_APPENDED_DTB
1960 bool "Use appended device tree blob to zImage (EXPERIMENTAL)" 1970 bool "Use appended device tree blob to zImage (EXPERIMENTAL)"
1961 depends on OF && !ZBOOT_ROM && EXPERIMENTAL 1971 depends on OF && !ZBOOT_ROM
1962 help 1972 help
1963 With this option, the boot code will look for a device tree binary 1973 With this option, the boot code will look for a device tree binary
1964 (DTB) appended to zImage 1974 (DTB) appended to zImage
@@ -2076,7 +2086,7 @@ config XIP_PHYS_ADDR
2076 2086
2077config KEXEC 2087config KEXEC
2078 bool "Kexec system call (EXPERIMENTAL)" 2088 bool "Kexec system call (EXPERIMENTAL)"
2079 depends on EXPERIMENTAL && (!SMP || HOTPLUG_CPU) 2089 depends on (!SMP || HOTPLUG_CPU)
2080 help 2090 help
2081 kexec is a system call that implements the ability to shutdown your 2091 kexec is a system call that implements the ability to shutdown your
2082 current kernel, and to start another kernel. It is like a reboot 2092 current kernel, and to start another kernel. It is like a reboot
@@ -2098,7 +2108,6 @@ config ATAGS_PROC
2098 2108
2099config CRASH_DUMP 2109config CRASH_DUMP
2100 bool "Build kdump crash kernel (EXPERIMENTAL)" 2110 bool "Build kdump crash kernel (EXPERIMENTAL)"
2101 depends on EXPERIMENTAL
2102 help 2111 help
2103 Generate crash dump after being started by kexec. This should 2112 Generate crash dump after being started by kexec. This should
2104 be normally only set in special crash dump kernels which are 2113 be normally only set in special crash dump kernels which are
@@ -2165,7 +2174,7 @@ config CPU_FREQ_S3C
2165 2174
2166config CPU_FREQ_S3C24XX 2175config CPU_FREQ_S3C24XX
2167 bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)" 2176 bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)"
2168 depends on ARCH_S3C24XX && CPU_FREQ && EXPERIMENTAL 2177 depends on ARCH_S3C24XX && CPU_FREQ
2169 select CPU_FREQ_S3C 2178 select CPU_FREQ_S3C
2170 help 2179 help
2171 This enables the CPUfreq driver for the Samsung S3C24XX family 2180 This enables the CPUfreq driver for the Samsung S3C24XX family
@@ -2177,7 +2186,7 @@ config CPU_FREQ_S3C24XX
2177 2186
2178config CPU_FREQ_S3C24XX_PLL 2187config CPU_FREQ_S3C24XX_PLL
2179 bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)" 2188 bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)"
2180 depends on CPU_FREQ_S3C24XX && EXPERIMENTAL 2189 depends on CPU_FREQ_S3C24XX
2181 help 2190 help
2182 Compile in support for changing the PLL frequency from the 2191 Compile in support for changing the PLL frequency from the
2183 S3C24XX series CPUfreq driver. The PLL takes time to settle 2192 S3C24XX series CPUfreq driver. The PLL takes time to settle
@@ -2240,7 +2249,7 @@ config FPE_NWFPE_XP
2240 2249
2241config FPE_FASTFPE 2250config FPE_FASTFPE
2242 bool "FastFPE math emulation (EXPERIMENTAL)" 2251 bool "FastFPE math emulation (EXPERIMENTAL)"
2243 depends on (!AEABI || OABI_COMPAT) && !CPU_32v3 && EXPERIMENTAL 2252 depends on (!AEABI || OABI_COMPAT) && !CPU_32v3
2244 ---help--- 2253 ---help---
2245 Say Y here to include the FAST floating point emulator in the kernel. 2254 Say Y here to include the FAST floating point emulator in the kernel.
2246 This is an experimental much faster emulator which now also has full 2255 This is an experimental much faster emulator which now also has full
@@ -2322,3 +2331,5 @@ source "security/Kconfig"
2322source "crypto/Kconfig" 2331source "crypto/Kconfig"
2323 2332
2324source "lib/Kconfig" 2333source "lib/Kconfig"
2334
2335source "arch/arm/kvm/Kconfig"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 661030d6bc6c..fc2a591e1676 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -32,7 +32,7 @@ config FRAME_POINTER
32 32
33config ARM_UNWIND 33config ARM_UNWIND
34 bool "Enable stack unwinding support (EXPERIMENTAL)" 34 bool "Enable stack unwinding support (EXPERIMENTAL)"
35 depends on AEABI && EXPERIMENTAL 35 depends on AEABI
36 default y 36 default y
37 help 37 help
38 This option enables stack unwinding support in the kernel 38 This option enables stack unwinding support in the kernel
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 30c443c406f3..4bcd2d6b0535 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -252,6 +252,7 @@ core-$(CONFIG_FPE_NWFPE) += arch/arm/nwfpe/
252core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ) 252core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ)
253core-$(CONFIG_VFP) += arch/arm/vfp/ 253core-$(CONFIG_VFP) += arch/arm/vfp/
254core-$(CONFIG_XEN) += arch/arm/xen/ 254core-$(CONFIG_XEN) += arch/arm/xen/
255core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/
255 256
256# If we have a machine-specific directory, then include it in the build. 257# If we have a machine-specific directory, then include it in the build.
257core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/ 258core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi
index 63f2fbcfe819..69140ba99f46 100644
--- a/arch/arm/boot/dts/dbx5x0.dtsi
+++ b/arch/arm/boot/dts/dbx5x0.dtsi
@@ -170,10 +170,9 @@
170 gpio-bank = <8>; 170 gpio-bank = <8>;
171 }; 171 };
172 172
173 pinctrl@80157000 { 173 pinctrl {
174 // This is actually the PRCMU base address 174 compatible = "stericsson,nmk-pinctrl";
175 reg = <0x80157000 0x2000>; 175 prcm = <&prcmu>;
176 compatible = "stericsson,nmk_pinctrl";
177 }; 176 };
178 177
179 usb@a03e0000 { 178 usb@a03e0000 {
@@ -190,9 +189,10 @@
190 interrupts = <0 25 0x4>; 189 interrupts = <0 25 0x4>;
191 }; 190 };
192 191
193 prcmu@80157000 { 192 prcmu: prcmu@80157000 {
194 compatible = "stericsson,db8500-prcmu"; 193 compatible = "stericsson,db8500-prcmu";
195 reg = <0x80157000 0x1000>; 194 reg = <0x80157000 0x1000>;
195 reg-names = "prcmu";
196 interrupts = <0 47 0x4>; 196 interrupts = <0 47 0x4>;
197 #address-cells = <1>; 197 #address-cells = <1>;
198 #size-cells = <1>; 198 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index e05b18f3c33d..4db9db0a8443 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -49,6 +49,11 @@
49 compatible = "samsung,s524ad0xd1"; 49 compatible = "samsung,s524ad0xd1";
50 reg = <0x51>; 50 reg = <0x51>;
51 }; 51 };
52
53 wm8994: wm8994@1a {
54 compatible = "wlf,wm8994";
55 reg = <0x1a>;
56 };
52 }; 57 };
53 58
54 i2c@121D0000 { 59 i2c@121D0000 {
@@ -204,4 +209,25 @@
204 samsung,mfc-r = <0x43000000 0x800000>; 209 samsung,mfc-r = <0x43000000 0x800000>;
205 samsung,mfc-l = <0x51000000 0x800000>; 210 samsung,mfc-l = <0x51000000 0x800000>;
206 }; 211 };
212
213 i2s0: i2s@03830000 {
214 gpios = <&gpz 0 2 0 0>, <&gpz 1 2 0 0>, <&gpz 2 2 0 0>,
215 <&gpz 3 2 0 0>, <&gpz 4 2 0 0>, <&gpz 5 2 0 0>,
216 <&gpz 6 2 0 0>;
217 };
218
219 i2s1: i2s@12D60000 {
220 status = "disabled";
221 };
222
223 i2s2: i2s@12D70000 {
224 status = "disabled";
225 };
226
227 sound {
228 compatible = "samsung,smdk-wm8994";
229
230 samsung,i2s-controller = <&i2s0>;
231 samsung,audio-codec = <&wm8994>;
232 };
207}; 233};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 3acf594ea60b..f50b4e854355 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -211,8 +211,9 @@
211 compatible = "samsung,exynos4210-spi"; 211 compatible = "samsung,exynos4210-spi";
212 reg = <0x12d20000 0x100>; 212 reg = <0x12d20000 0x100>;
213 interrupts = <0 66 0>; 213 interrupts = <0 66 0>;
214 tx-dma-channel = <&pdma0 5>; /* preliminary */ 214 dmas = <&pdma0 5
215 rx-dma-channel = <&pdma0 4>; /* preliminary */ 215 &pdma0 4>;
216 dma-names = "tx", "rx";
216 #address-cells = <1>; 217 #address-cells = <1>;
217 #size-cells = <0>; 218 #size-cells = <0>;
218 }; 219 };
@@ -221,8 +222,9 @@
221 compatible = "samsung,exynos4210-spi"; 222 compatible = "samsung,exynos4210-spi";
222 reg = <0x12d30000 0x100>; 223 reg = <0x12d30000 0x100>;
223 interrupts = <0 67 0>; 224 interrupts = <0 67 0>;
224 tx-dma-channel = <&pdma1 5>; /* preliminary */ 225 dmas = <&pdma1 5
225 rx-dma-channel = <&pdma1 4>; /* preliminary */ 226 &pdma1 4>;
227 dma-names = "tx", "rx";
226 #address-cells = <1>; 228 #address-cells = <1>;
227 #size-cells = <0>; 229 #size-cells = <0>;
228 }; 230 };
@@ -231,8 +233,9 @@
231 compatible = "samsung,exynos4210-spi"; 233 compatible = "samsung,exynos4210-spi";
232 reg = <0x12d40000 0x100>; 234 reg = <0x12d40000 0x100>;
233 interrupts = <0 68 0>; 235 interrupts = <0 68 0>;
234 tx-dma-channel = <&pdma0 7>; /* preliminary */ 236 dmas = <&pdma0 7
235 rx-dma-channel = <&pdma0 6>; /* preliminary */ 237 &pdma0 6>;
238 dma-names = "tx", "rx";
236 #address-cells = <1>; 239 #address-cells = <1>;
237 #size-cells = <0>; 240 #size-cells = <0>;
238 }; 241 };
@@ -269,6 +272,35 @@
269 #size-cells = <0>; 272 #size-cells = <0>;
270 }; 273 };
271 274
275 i2s0: i2s@03830000 {
276 compatible = "samsung,i2s-v5";
277 reg = <0x03830000 0x100>;
278 dmas = <&pdma0 10
279 &pdma0 9
280 &pdma0 8>;
281 dma-names = "tx", "rx", "tx-sec";
282 samsung,supports-6ch;
283 samsung,supports-rstclr;
284 samsung,supports-secdai;
285 samsung,idma-addr = <0x03000000>;
286 };
287
288 i2s1: i2s@12D60000 {
289 compatible = "samsung,i2s-v5";
290 reg = <0x12D60000 0x100>;
291 dmas = <&pdma1 12
292 &pdma1 11>;
293 dma-names = "tx", "rx";
294 };
295
296 i2s2: i2s@12D70000 {
297 compatible = "samsung,i2s-v5";
298 reg = <0x12D70000 0x100>;
299 dmas = <&pdma0 12
300 &pdma0 11>;
301 dma-names = "tx", "rx";
302 };
303
272 amba { 304 amba {
273 #address-cells = <1>; 305 #address-cells = <1>;
274 #size-cells = <1>; 306 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts
index 5927a8df5625..6aad34ad9517 100644
--- a/arch/arm/boot/dts/highbank.dts
+++ b/arch/arm/boot/dts/highbank.dts
@@ -37,6 +37,16 @@
37 next-level-cache = <&L2>; 37 next-level-cache = <&L2>;
38 clocks = <&a9pll>; 38 clocks = <&a9pll>;
39 clock-names = "cpu"; 39 clock-names = "cpu";
40 operating-points = <
41 /* kHz ignored */
42 1300000 1000000
43 1200000 1000000
44 1100000 1000000
45 800000 1000000
46 400000 1000000
47 200000 1000000
48 >;
49 clock-latency = <100000>;
40 }; 50 };
41 51
42 cpu@901 { 52 cpu@901 {
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index 65415c598a5e..56afcf41aae0 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -391,7 +391,9 @@
391 }; 391 };
392 392
393 lradc@80050000 { 393 lradc@80050000 {
394 compatible = "fsl,imx23-lradc";
394 reg = <0x80050000 0x2000>; 395 reg = <0x80050000 0x2000>;
396 interrupts = <36 37 38 39 40 41 42 43 44>;
395 status = "disabled"; 397 status = "disabled";
396 }; 398 };
397 399
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index 055fca542120..3329719a9412 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -58,10 +58,11 @@
58 #size-cells = <1>; 58 #size-cells = <1>;
59 ranges = <0x88000000 0x88000000 0x40000>; 59 ranges = <0x88000000 0x88000000 0x40000>;
60 60
61 clock-controller@88000000 { 61 clks: clock-controller@88000000 {
62 compatible = "sirf,prima2-clkc"; 62 compatible = "sirf,prima2-clkc";
63 reg = <0x88000000 0x1000>; 63 reg = <0x88000000 0x1000>;
64 interrupts = <3>; 64 interrupts = <3>;
65 #clock-cells = <1>;
65 }; 66 };
66 67
67 reset-controller@88010000 { 68 reset-controller@88010000 {
@@ -85,6 +86,7 @@
85 compatible = "sirf,prima2-memc"; 86 compatible = "sirf,prima2-memc";
86 reg = <0x90000000 0x10000>; 87 reg = <0x90000000 0x10000>;
87 interrupts = <27>; 88 interrupts = <27>;
89 clocks = <&clks 5>;
88 }; 90 };
89 }; 91 };
90 92
@@ -104,6 +106,7 @@
104 compatible = "sirf,prima2-vpp"; 106 compatible = "sirf,prima2-vpp";
105 reg = <0x90020000 0x10000>; 107 reg = <0x90020000 0x10000>;
106 interrupts = <31>; 108 interrupts = <31>;
109 clocks = <&clks 35>;
107 }; 110 };
108 }; 111 };
109 112
@@ -117,6 +120,7 @@
117 compatible = "powervr,sgx531"; 120 compatible = "powervr,sgx531";
118 reg = <0x98000000 0x8000000>; 121 reg = <0x98000000 0x8000000>;
119 interrupts = <6>; 122 interrupts = <6>;
123 clocks = <&clks 32>;
120 }; 124 };
121 }; 125 };
122 126
@@ -130,6 +134,7 @@
130 compatible = "sirf,prima2-video-codec"; 134 compatible = "sirf,prima2-video-codec";
131 reg = <0xa0000000 0x8000000>; 135 reg = <0xa0000000 0x8000000>;
132 interrupts = <5>; 136 interrupts = <5>;
137 clocks = <&clks 33>;
133 }; 138 };
134 }; 139 };
135 140
@@ -149,12 +154,14 @@
149 compatible = "sirf,prima2-gps"; 154 compatible = "sirf,prima2-gps";
150 reg = <0xa8010000 0x10000>; 155 reg = <0xa8010000 0x10000>;
151 interrupts = <7>; 156 interrupts = <7>;
157 clocks = <&clks 9>;
152 }; 158 };
153 159
154 dsp@a9000000 { 160 dsp@a9000000 {
155 compatible = "sirf,prima2-dsp"; 161 compatible = "sirf,prima2-dsp";
156 reg = <0xa9000000 0x1000000>; 162 reg = <0xa9000000 0x1000000>;
157 interrupts = <8>; 163 interrupts = <8>;
164 clocks = <&clks 8>;
158 }; 165 };
159 }; 166 };
160 167
@@ -174,12 +181,14 @@
174 compatible = "sirf,prima2-nand"; 181 compatible = "sirf,prima2-nand";
175 reg = <0xb0030000 0x10000>; 182 reg = <0xb0030000 0x10000>;
176 interrupts = <41>; 183 interrupts = <41>;
184 clocks = <&clks 26>;
177 }; 185 };
178 186
179 audio@b0040000 { 187 audio@b0040000 {
180 compatible = "sirf,prima2-audio"; 188 compatible = "sirf,prima2-audio";
181 reg = <0xb0040000 0x10000>; 189 reg = <0xb0040000 0x10000>;
182 interrupts = <35>; 190 interrupts = <35>;
191 clocks = <&clks 27>;
183 }; 192 };
184 193
185 uart0: uart@b0050000 { 194 uart0: uart@b0050000 {
@@ -187,6 +196,7 @@
187 compatible = "sirf,prima2-uart"; 196 compatible = "sirf,prima2-uart";
188 reg = <0xb0050000 0x10000>; 197 reg = <0xb0050000 0x10000>;
189 interrupts = <17>; 198 interrupts = <17>;
199 clocks = <&clks 13>;
190 }; 200 };
191 201
192 uart1: uart@b0060000 { 202 uart1: uart@b0060000 {
@@ -194,6 +204,7 @@
194 compatible = "sirf,prima2-uart"; 204 compatible = "sirf,prima2-uart";
195 reg = <0xb0060000 0x10000>; 205 reg = <0xb0060000 0x10000>;
196 interrupts = <18>; 206 interrupts = <18>;
207 clocks = <&clks 14>;
197 }; 208 };
198 209
199 uart2: uart@b0070000 { 210 uart2: uart@b0070000 {
@@ -201,6 +212,7 @@
201 compatible = "sirf,prima2-uart"; 212 compatible = "sirf,prima2-uart";
202 reg = <0xb0070000 0x10000>; 213 reg = <0xb0070000 0x10000>;
203 interrupts = <19>; 214 interrupts = <19>;
215 clocks = <&clks 15>;
204 }; 216 };
205 217
206 usp0: usp@b0080000 { 218 usp0: usp@b0080000 {
@@ -208,6 +220,7 @@
208 compatible = "sirf,prima2-usp"; 220 compatible = "sirf,prima2-usp";
209 reg = <0xb0080000 0x10000>; 221 reg = <0xb0080000 0x10000>;
210 interrupts = <20>; 222 interrupts = <20>;
223 clocks = <&clks 28>;
211 }; 224 };
212 225
213 usp1: usp@b0090000 { 226 usp1: usp@b0090000 {
@@ -215,6 +228,7 @@
215 compatible = "sirf,prima2-usp"; 228 compatible = "sirf,prima2-usp";
216 reg = <0xb0090000 0x10000>; 229 reg = <0xb0090000 0x10000>;
217 interrupts = <21>; 230 interrupts = <21>;
231 clocks = <&clks 29>;
218 }; 232 };
219 233
220 usp2: usp@b00a0000 { 234 usp2: usp@b00a0000 {
@@ -222,6 +236,7 @@
222 compatible = "sirf,prima2-usp"; 236 compatible = "sirf,prima2-usp";
223 reg = <0xb00a0000 0x10000>; 237 reg = <0xb00a0000 0x10000>;
224 interrupts = <22>; 238 interrupts = <22>;
239 clocks = <&clks 30>;
225 }; 240 };
226 241
227 dmac0: dma-controller@b00b0000 { 242 dmac0: dma-controller@b00b0000 {
@@ -229,6 +244,7 @@
229 compatible = "sirf,prima2-dmac"; 244 compatible = "sirf,prima2-dmac";
230 reg = <0xb00b0000 0x10000>; 245 reg = <0xb00b0000 0x10000>;
231 interrupts = <12>; 246 interrupts = <12>;
247 clocks = <&clks 24>;
232 }; 248 };
233 249
234 dmac1: dma-controller@b0160000 { 250 dmac1: dma-controller@b0160000 {
@@ -236,11 +252,13 @@
236 compatible = "sirf,prima2-dmac"; 252 compatible = "sirf,prima2-dmac";
237 reg = <0xb0160000 0x10000>; 253 reg = <0xb0160000 0x10000>;
238 interrupts = <13>; 254 interrupts = <13>;
255 clocks = <&clks 25>;
239 }; 256 };
240 257
241 vip@b00C0000 { 258 vip@b00C0000 {
242 compatible = "sirf,prima2-vip"; 259 compatible = "sirf,prima2-vip";
243 reg = <0xb00C0000 0x10000>; 260 reg = <0xb00C0000 0x10000>;
261 clocks = <&clks 31>;
244 }; 262 };
245 263
246 spi0: spi@b00d0000 { 264 spi0: spi@b00d0000 {
@@ -248,6 +266,7 @@
248 compatible = "sirf,prima2-spi"; 266 compatible = "sirf,prima2-spi";
249 reg = <0xb00d0000 0x10000>; 267 reg = <0xb00d0000 0x10000>;
250 interrupts = <15>; 268 interrupts = <15>;
269 clocks = <&clks 19>;
251 }; 270 };
252 271
253 spi1: spi@b0170000 { 272 spi1: spi@b0170000 {
@@ -255,6 +274,7 @@
255 compatible = "sirf,prima2-spi"; 274 compatible = "sirf,prima2-spi";
256 reg = <0xb0170000 0x10000>; 275 reg = <0xb0170000 0x10000>;
257 interrupts = <16>; 276 interrupts = <16>;
277 clocks = <&clks 20>;
258 }; 278 };
259 279
260 i2c0: i2c@b00e0000 { 280 i2c0: i2c@b00e0000 {
@@ -262,6 +282,7 @@
262 compatible = "sirf,prima2-i2c"; 282 compatible = "sirf,prima2-i2c";
263 reg = <0xb00e0000 0x10000>; 283 reg = <0xb00e0000 0x10000>;
264 interrupts = <24>; 284 interrupts = <24>;
285 clocks = <&clks 17>;
265 }; 286 };
266 287
267 i2c1: i2c@b00f0000 { 288 i2c1: i2c@b00f0000 {
@@ -269,12 +290,14 @@
269 compatible = "sirf,prima2-i2c"; 290 compatible = "sirf,prima2-i2c";
270 reg = <0xb00f0000 0x10000>; 291 reg = <0xb00f0000 0x10000>;
271 interrupts = <25>; 292 interrupts = <25>;
293 clocks = <&clks 18>;
272 }; 294 };
273 295
274 tsc@b0110000 { 296 tsc@b0110000 {
275 compatible = "sirf,prima2-tsc"; 297 compatible = "sirf,prima2-tsc";
276 reg = <0xb0110000 0x10000>; 298 reg = <0xb0110000 0x10000>;
277 interrupts = <33>; 299 interrupts = <33>;
300 clocks = <&clks 16>;
278 }; 301 };
279 302
280 gpio: pinctrl@b0120000 { 303 gpio: pinctrl@b0120000 {
@@ -507,17 +530,20 @@
507 pwm@b0130000 { 530 pwm@b0130000 {
508 compatible = "sirf,prima2-pwm"; 531 compatible = "sirf,prima2-pwm";
509 reg = <0xb0130000 0x10000>; 532 reg = <0xb0130000 0x10000>;
533 clocks = <&clks 21>;
510 }; 534 };
511 535
512 efusesys@b0140000 { 536 efusesys@b0140000 {
513 compatible = "sirf,prima2-efuse"; 537 compatible = "sirf,prima2-efuse";
514 reg = <0xb0140000 0x10000>; 538 reg = <0xb0140000 0x10000>;
539 clocks = <&clks 22>;
515 }; 540 };
516 541
517 pulsec@b0150000 { 542 pulsec@b0150000 {
518 compatible = "sirf,prima2-pulsec"; 543 compatible = "sirf,prima2-pulsec";
519 reg = <0xb0150000 0x10000>; 544 reg = <0xb0150000 0x10000>;
520 interrupts = <48>; 545 interrupts = <48>;
546 clocks = <&clks 23>;
521 }; 547 };
522 548
523 pci-iobg { 549 pci-iobg {
@@ -616,12 +642,14 @@
616 compatible = "chipidea,ci13611a-prima2"; 642 compatible = "chipidea,ci13611a-prima2";
617 reg = <0xb8000000 0x10000>; 643 reg = <0xb8000000 0x10000>;
618 interrupts = <10>; 644 interrupts = <10>;
645 clocks = <&clks 40>;
619 }; 646 };
620 647
621 usb1: usb@b00f0000 { 648 usb1: usb@b00f0000 {
622 compatible = "chipidea,ci13611a-prima2"; 649 compatible = "chipidea,ci13611a-prima2";
623 reg = <0xb8010000 0x10000>; 650 reg = <0xb8010000 0x10000>;
624 interrupts = <11>; 651 interrupts = <11>;
652 clocks = <&clks 41>;
625 }; 653 };
626 654
627 sata@b00f0000 { 655 sata@b00f0000 {
@@ -634,6 +662,7 @@
634 compatible = "sirf,prima2-security"; 662 compatible = "sirf,prima2-security";
635 reg = <0xb8030000 0x10000>; 663 reg = <0xb8030000 0x10000>;
636 interrupts = <42>; 664 interrupts = <42>;
665 clocks = <&clks 7>;
637 }; 666 };
638 }; 667 };
639 }; 668 };
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index e61fdd47bd01..f99f60dadf5d 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -16,4 +16,34 @@
16 memory { 16 memory {
17 reg = <0x40000000 0x80000000>; 17 reg = <0x40000000 0x80000000>;
18 }; 18 };
19
20 soc {
21 pinctrl@01c20800 {
22 compatible = "allwinner,sun4i-a10-pinctrl";
23 reg = <0x01c20800 0x400>;
24 #address-cells = <1>;
25 #size-cells = <0>;
26
27 uart0_pins_a: uart0@0 {
28 allwinner,pins = "PB22", "PB23";
29 allwinner,function = "uart0";
30 allwinner,drive = <0>;
31 allwinner,pull = <0>;
32 };
33
34 uart0_pins_b: uart0@1 {
35 allwinner,pins = "PF2", "PF4";
36 allwinner,function = "uart0";
37 allwinner,drive = <0>;
38 allwinner,pull = <0>;
39 };
40
41 uart1_pins_a: uart1@0 {
42 allwinner,pins = "PA10", "PA11";
43 allwinner,function = "uart1";
44 allwinner,drive = <0>;
45 allwinner,pull = <0>;
46 };
47 };
48 };
19}; 49};
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
index 498a091a4ea2..4a1e45d4aace 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
@@ -24,6 +24,8 @@
24 24
25 soc { 25 soc {
26 uart1: uart@01c28400 { 26 uart1: uart@01c28400 {
27 pinctrl-names = "default";
28 pinctrl-0 = <&uart1_pins_b>;
27 status = "okay"; 29 status = "okay";
28 }; 30 };
29 }; 31 };
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index 59a2d265a98e..e1121890fb29 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -17,4 +17,27 @@
17 memory { 17 memory {
18 reg = <0x40000000 0x20000000>; 18 reg = <0x40000000 0x20000000>;
19 }; 19 };
20
21 soc {
22 pinctrl@01c20800 {
23 compatible = "allwinner,sun5i-a13-pinctrl";
24 reg = <0x01c20800 0x400>;
25 #address-cells = <1>;
26 #size-cells = <0>;
27
28 uart1_pins_a: uart1@0 {
29 allwinner,pins = "PE10", "PE11";
30 allwinner,function = "uart1";
31 allwinner,drive = <0>;
32 allwinner,pull = <0>;
33 };
34
35 uart1_pins_b: uart1@1 {
36 allwinner,pins = "PG3", "PG4";
37 allwinner,function = "uart1";
38 allwinner,drive = <0>;
39 allwinner,pull = <0>;
40 };
41 };
42 };
20}; 43};
diff --git a/arch/arm/boot/dts/vt8500.dtsi b/arch/arm/boot/dts/vt8500.dtsi
index d8645e990b21..cf31ced46602 100644
--- a/arch/arm/boot/dts/vt8500.dtsi
+++ b/arch/arm/boot/dts/vt8500.dtsi
@@ -45,6 +45,38 @@
45 compatible = "fixed-clock"; 45 compatible = "fixed-clock";
46 clock-frequency = <24000000>; 46 clock-frequency = <24000000>;
47 }; 47 };
48
49 clkuart0: uart0 {
50 #clock-cells = <0>;
51 compatible = "via,vt8500-device-clock";
52 clocks = <&ref24>;
53 enable-reg = <0x250>;
54 enable-bit = <1>;
55 };
56
57 clkuart1: uart1 {
58 #clock-cells = <0>;
59 compatible = "via,vt8500-device-clock";
60 clocks = <&ref24>;
61 enable-reg = <0x250>;
62 enable-bit = <2>;
63 };
64
65 clkuart2: uart2 {
66 #clock-cells = <0>;
67 compatible = "via,vt8500-device-clock";
68 clocks = <&ref24>;
69 enable-reg = <0x250>;
70 enable-bit = <3>;
71 };
72
73 clkuart3: uart3 {
74 #clock-cells = <0>;
75 compatible = "via,vt8500-device-clock";
76 clocks = <&ref24>;
77 enable-reg = <0x250>;
78 enable-bit = <4>;
79 };
48 }; 80 };
49 }; 81 };
50 82
@@ -83,28 +115,28 @@
83 compatible = "via,vt8500-uart"; 115 compatible = "via,vt8500-uart";
84 reg = <0xd8200000 0x1040>; 116 reg = <0xd8200000 0x1040>;
85 interrupts = <32>; 117 interrupts = <32>;
86 clocks = <&ref24>; 118 clocks = <&clkuart0>;
87 }; 119 };
88 120
89 uart@d82b0000 { 121 uart@d82b0000 {
90 compatible = "via,vt8500-uart"; 122 compatible = "via,vt8500-uart";
91 reg = <0xd82b0000 0x1040>; 123 reg = <0xd82b0000 0x1040>;
92 interrupts = <33>; 124 interrupts = <33>;
93 clocks = <&ref24>; 125 clocks = <&clkuart1>;
94 }; 126 };
95 127
96 uart@d8210000 { 128 uart@d8210000 {
97 compatible = "via,vt8500-uart"; 129 compatible = "via,vt8500-uart";
98 reg = <0xd8210000 0x1040>; 130 reg = <0xd8210000 0x1040>;
99 interrupts = <47>; 131 interrupts = <47>;
100 clocks = <&ref24>; 132 clocks = <&clkuart2>;
101 }; 133 };
102 134
103 uart@d82c0000 { 135 uart@d82c0000 {
104 compatible = "via,vt8500-uart"; 136 compatible = "via,vt8500-uart";
105 reg = <0xd82c0000 0x1040>; 137 reg = <0xd82c0000 0x1040>;
106 interrupts = <50>; 138 interrupts = <50>;
107 clocks = <&ref24>; 139 clocks = <&clkuart3>;
108 }; 140 };
109 141
110 rtc@d8100000 { 142 rtc@d8100000 {
diff --git a/arch/arm/boot/dts/wm8505.dtsi b/arch/arm/boot/dts/wm8505.dtsi
index 330f833ac3b0..e74a1c0fb9a2 100644
--- a/arch/arm/boot/dts/wm8505.dtsi
+++ b/arch/arm/boot/dts/wm8505.dtsi
@@ -59,6 +59,54 @@
59 compatible = "fixed-clock"; 59 compatible = "fixed-clock";
60 clock-frequency = <24000000>; 60 clock-frequency = <24000000>;
61 }; 61 };
62
63 clkuart0: uart0 {
64 #clock-cells = <0>;
65 compatible = "via,vt8500-device-clock";
66 clocks = <&ref24>;
67 enable-reg = <0x250>;
68 enable-bit = <1>;
69 };
70
71 clkuart1: uart1 {
72 #clock-cells = <0>;
73 compatible = "via,vt8500-device-clock";
74 clocks = <&ref24>;
75 enable-reg = <0x250>;
76 enable-bit = <2>;
77 };
78
79 clkuart2: uart2 {
80 #clock-cells = <0>;
81 compatible = "via,vt8500-device-clock";
82 clocks = <&ref24>;
83 enable-reg = <0x250>;
84 enable-bit = <3>;
85 };
86
87 clkuart3: uart3 {
88 #clock-cells = <0>;
89 compatible = "via,vt8500-device-clock";
90 clocks = <&ref24>;
91 enable-reg = <0x250>;
92 enable-bit = <4>;
93 };
94
95 clkuart4: uart4 {
96 #clock-cells = <0>;
97 compatible = "via,vt8500-device-clock";
98 clocks = <&ref24>;
99 enable-reg = <0x250>;
100 enable-bit = <22>;
101 };
102
103 clkuart5: uart5 {
104 #clock-cells = <0>;
105 compatible = "via,vt8500-device-clock";
106 clocks = <&ref24>;
107 enable-reg = <0x250>;
108 enable-bit = <23>;
109 };
62 }; 110 };
63 }; 111 };
64 112
@@ -96,42 +144,42 @@
96 compatible = "via,vt8500-uart"; 144 compatible = "via,vt8500-uart";
97 reg = <0xd8200000 0x1040>; 145 reg = <0xd8200000 0x1040>;
98 interrupts = <32>; 146 interrupts = <32>;
99 clocks = <&ref24>; 147 clocks = <&clkuart0>;
100 }; 148 };
101 149
102 uart@d82b0000 { 150 uart@d82b0000 {
103 compatible = "via,vt8500-uart"; 151 compatible = "via,vt8500-uart";
104 reg = <0xd82b0000 0x1040>; 152 reg = <0xd82b0000 0x1040>;
105 interrupts = <33>; 153 interrupts = <33>;
106 clocks = <&ref24>; 154 clocks = <&clkuart1>;
107 }; 155 };
108 156
109 uart@d8210000 { 157 uart@d8210000 {
110 compatible = "via,vt8500-uart"; 158 compatible = "via,vt8500-uart";
111 reg = <0xd8210000 0x1040>; 159 reg = <0xd8210000 0x1040>;
112 interrupts = <47>; 160 interrupts = <47>;
113 clocks = <&ref24>; 161 clocks = <&clkuart2>;
114 }; 162 };
115 163
116 uart@d82c0000 { 164 uart@d82c0000 {
117 compatible = "via,vt8500-uart"; 165 compatible = "via,vt8500-uart";
118 reg = <0xd82c0000 0x1040>; 166 reg = <0xd82c0000 0x1040>;
119 interrupts = <50>; 167 interrupts = <50>;
120 clocks = <&ref24>; 168 clocks = <&clkuart3>;
121 }; 169 };
122 170
123 uart@d8370000 { 171 uart@d8370000 {
124 compatible = "via,vt8500-uart"; 172 compatible = "via,vt8500-uart";
125 reg = <0xd8370000 0x1040>; 173 reg = <0xd8370000 0x1040>;
126 interrupts = <31>; 174 interrupts = <31>;
127 clocks = <&ref24>; 175 clocks = <&clkuart4>;
128 }; 176 };
129 177
130 uart@d8380000 { 178 uart@d8380000 {
131 compatible = "via,vt8500-uart"; 179 compatible = "via,vt8500-uart";
132 reg = <0xd8380000 0x1040>; 180 reg = <0xd8380000 0x1040>;
133 interrupts = <30>; 181 interrupts = <30>;
134 clocks = <&ref24>; 182 clocks = <&clkuart5>;
135 }; 183 };
136 184
137 rtc@d8100000 { 185 rtc@d8100000 {
diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi
index 83b9467559bb..db3c0a12e052 100644
--- a/arch/arm/boot/dts/wm8650.dtsi
+++ b/arch/arm/boot/dts/wm8650.dtsi
@@ -75,6 +75,22 @@
75 reg = <0x204>; 75 reg = <0x204>;
76 }; 76 };
77 77
78 clkuart0: uart0 {
79 #clock-cells = <0>;
80 compatible = "via,vt8500-device-clock";
81 clocks = <&ref24>;
82 enable-reg = <0x250>;
83 enable-bit = <1>;
84 };
85
86 clkuart1: uart1 {
87 #clock-cells = <0>;
88 compatible = "via,vt8500-device-clock";
89 clocks = <&ref24>;
90 enable-reg = <0x250>;
91 enable-bit = <2>;
92 };
93
78 arm: arm { 94 arm: arm {
79 #clock-cells = <0>; 95 #clock-cells = <0>;
80 compatible = "via,vt8500-device-clock"; 96 compatible = "via,vt8500-device-clock";
@@ -128,14 +144,14 @@
128 compatible = "via,vt8500-uart"; 144 compatible = "via,vt8500-uart";
129 reg = <0xd8200000 0x1040>; 145 reg = <0xd8200000 0x1040>;
130 interrupts = <32>; 146 interrupts = <32>;
131 clocks = <&ref24>; 147 clocks = <&clkuart0>;
132 }; 148 };
133 149
134 uart@d82b0000 { 150 uart@d82b0000 {
135 compatible = "via,vt8500-uart"; 151 compatible = "via,vt8500-uart";
136 reg = <0xd82b0000 0x1040>; 152 reg = <0xd82b0000 0x1040>;
137 interrupts = <33>; 153 interrupts = <33>;
138 clocks = <&ref24>; 154 clocks = <&clkuart1>;
139 }; 155 };
140 156
141 rtc@d8100000 { 157 rtc@d8100000 {
diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi
index 401c1262d4ed..5914b5654591 100644
--- a/arch/arm/boot/dts/zynq-7000.dtsi
+++ b/arch/arm/boot/dts/zynq-7000.dtsi
@@ -44,14 +44,14 @@
44 compatible = "xlnx,xuartps"; 44 compatible = "xlnx,xuartps";
45 reg = <0xE0000000 0x1000>; 45 reg = <0xE0000000 0x1000>;
46 interrupts = <0 27 4>; 46 interrupts = <0 27 4>;
47 clock = <50000000>; 47 clocks = <&uart_clk 0>;
48 }; 48 };
49 49
50 uart1: uart@e0001000 { 50 uart1: uart@e0001000 {
51 compatible = "xlnx,xuartps"; 51 compatible = "xlnx,xuartps";
52 reg = <0xE0001000 0x1000>; 52 reg = <0xE0001000 0x1000>;
53 interrupts = <0 50 4>; 53 interrupts = <0 50 4>;
54 clock = <50000000>; 54 clocks = <&uart_clk 1>;
55 }; 55 };
56 56
57 slcr: slcr@f8000000 { 57 slcr: slcr@f8000000 {
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 36ae03a3f5d1..87dfa9026c5b 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
351 irq_set_chained_handler(irq, gic_handle_cascade_irq); 351 irq_set_chained_handler(irq, gic_handle_cascade_irq);
352} 352}
353 353
354static u8 gic_get_cpumask(struct gic_chip_data *gic)
355{
356 void __iomem *base = gic_data_dist_base(gic);
357 u32 mask, i;
358
359 for (i = mask = 0; i < 32; i += 4) {
360 mask = readl_relaxed(base + GIC_DIST_TARGET + i);
361 mask |= mask >> 16;
362 mask |= mask >> 8;
363 if (mask)
364 break;
365 }
366
367 if (!mask)
368 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
369
370 return mask;
371}
372
354static void __init gic_dist_init(struct gic_chip_data *gic) 373static void __init gic_dist_init(struct gic_chip_data *gic)
355{ 374{
356 unsigned int i; 375 unsigned int i;
@@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
369 /* 388 /*
370 * Set all global interrupts to this CPU only. 389 * Set all global interrupts to this CPU only.
371 */ 390 */
372 cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0); 391 cpumask = gic_get_cpumask(gic);
392 cpumask |= cpumask << 8;
393 cpumask |= cpumask << 16;
373 for (i = 32; i < gic_irqs; i += 4) 394 for (i = 32; i < gic_irqs; i += 4)
374 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 395 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
375 396
@@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
400 * Get what the GIC says our CPU mask is. 421 * Get what the GIC says our CPU mask is.
401 */ 422 */
402 BUG_ON(cpu >= NR_GIC_CPU_IF); 423 BUG_ON(cpu >= NR_GIC_CPU_IF);
403 cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0); 424 cpu_mask = gic_get_cpumask(gic);
404 gic_cpu_map[cpu] = cpu_mask; 425 gic_cpu_map[cpu] = cpu_mask;
405 426
406 /* 427 /*
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S
index e59b1d505d6c..19d6cd6f29f9 100644
--- a/arch/arm/crypto/aes-armv4.S
+++ b/arch/arm/crypto/aes-armv4.S
@@ -34,8 +34,9 @@
34@ A little glue here to select the correct code below for the ARM CPU 34@ A little glue here to select the correct code below for the ARM CPU
35@ that is being targetted. 35@ that is being targetted.
36 36
37#include <linux/linkage.h>
38
37.text 39.text
38.code 32
39 40
40.type AES_Te,%object 41.type AES_Te,%object
41.align 5 42.align 5
@@ -145,10 +146,8 @@ AES_Te:
145 146
146@ void AES_encrypt(const unsigned char *in, unsigned char *out, 147@ void AES_encrypt(const unsigned char *in, unsigned char *out,
147@ const AES_KEY *key) { 148@ const AES_KEY *key) {
148.global AES_encrypt
149.type AES_encrypt,%function
150.align 5 149.align 5
151AES_encrypt: 150ENTRY(AES_encrypt)
152 sub r3,pc,#8 @ AES_encrypt 151 sub r3,pc,#8 @ AES_encrypt
153 stmdb sp!,{r1,r4-r12,lr} 152 stmdb sp!,{r1,r4-r12,lr}
154 mov r12,r0 @ inp 153 mov r12,r0 @ inp
@@ -239,15 +238,8 @@ AES_encrypt:
239 strb r6,[r12,#14] 238 strb r6,[r12,#14]
240 strb r3,[r12,#15] 239 strb r3,[r12,#15]
241#endif 240#endif
242#if __ARM_ARCH__>=5
243 ldmia sp!,{r4-r12,pc} 241 ldmia sp!,{r4-r12,pc}
244#else 242ENDPROC(AES_encrypt)
245 ldmia sp!,{r4-r12,lr}
246 tst lr,#1
247 moveq pc,lr @ be binary compatible with V4, yet
248 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
249#endif
250.size AES_encrypt,.-AES_encrypt
251 243
252.type _armv4_AES_encrypt,%function 244.type _armv4_AES_encrypt,%function
253.align 2 245.align 2
@@ -386,10 +378,8 @@ _armv4_AES_encrypt:
386 ldr pc,[sp],#4 @ pop and return 378 ldr pc,[sp],#4 @ pop and return
387.size _armv4_AES_encrypt,.-_armv4_AES_encrypt 379.size _armv4_AES_encrypt,.-_armv4_AES_encrypt
388 380
389.global private_AES_set_encrypt_key
390.type private_AES_set_encrypt_key,%function
391.align 5 381.align 5
392private_AES_set_encrypt_key: 382ENTRY(private_AES_set_encrypt_key)
393_armv4_AES_set_encrypt_key: 383_armv4_AES_set_encrypt_key:
394 sub r3,pc,#8 @ AES_set_encrypt_key 384 sub r3,pc,#8 @ AES_set_encrypt_key
395 teq r0,#0 385 teq r0,#0
@@ -658,15 +648,11 @@ _armv4_AES_set_encrypt_key:
658 648
659.Ldone: mov r0,#0 649.Ldone: mov r0,#0
660 ldmia sp!,{r4-r12,lr} 650 ldmia sp!,{r4-r12,lr}
661.Labrt: tst lr,#1 651.Labrt: mov pc,lr
662 moveq pc,lr @ be binary compatible with V4, yet 652ENDPROC(private_AES_set_encrypt_key)
663 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
664.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
665 653
666.global private_AES_set_decrypt_key
667.type private_AES_set_decrypt_key,%function
668.align 5 654.align 5
669private_AES_set_decrypt_key: 655ENTRY(private_AES_set_decrypt_key)
670 str lr,[sp,#-4]! @ push lr 656 str lr,[sp,#-4]! @ push lr
671#if 0 657#if 0
672 @ kernel does both of these in setkey so optimise this bit out by 658 @ kernel does both of these in setkey so optimise this bit out by
@@ -748,15 +734,8 @@ private_AES_set_decrypt_key:
748 bne .Lmix 734 bne .Lmix
749 735
750 mov r0,#0 736 mov r0,#0
751#if __ARM_ARCH__>=5
752 ldmia sp!,{r4-r12,pc} 737 ldmia sp!,{r4-r12,pc}
753#else 738ENDPROC(private_AES_set_decrypt_key)
754 ldmia sp!,{r4-r12,lr}
755 tst lr,#1
756 moveq pc,lr @ be binary compatible with V4, yet
757 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
758#endif
759.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
760 739
761.type AES_Td,%object 740.type AES_Td,%object
762.align 5 741.align 5
@@ -862,10 +841,8 @@ AES_Td:
862 841
863@ void AES_decrypt(const unsigned char *in, unsigned char *out, 842@ void AES_decrypt(const unsigned char *in, unsigned char *out,
864@ const AES_KEY *key) { 843@ const AES_KEY *key) {
865.global AES_decrypt
866.type AES_decrypt,%function
867.align 5 844.align 5
868AES_decrypt: 845ENTRY(AES_decrypt)
869 sub r3,pc,#8 @ AES_decrypt 846 sub r3,pc,#8 @ AES_decrypt
870 stmdb sp!,{r1,r4-r12,lr} 847 stmdb sp!,{r1,r4-r12,lr}
871 mov r12,r0 @ inp 848 mov r12,r0 @ inp
@@ -956,15 +933,8 @@ AES_decrypt:
956 strb r6,[r12,#14] 933 strb r6,[r12,#14]
957 strb r3,[r12,#15] 934 strb r3,[r12,#15]
958#endif 935#endif
959#if __ARM_ARCH__>=5
960 ldmia sp!,{r4-r12,pc} 936 ldmia sp!,{r4-r12,pc}
961#else 937ENDPROC(AES_decrypt)
962 ldmia sp!,{r4-r12,lr}
963 tst lr,#1
964 moveq pc,lr @ be binary compatible with V4, yet
965 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
966#endif
967.size AES_decrypt,.-AES_decrypt
968 938
969.type _armv4_AES_decrypt,%function 939.type _armv4_AES_decrypt,%function
970.align 2 940.align 2
@@ -1064,7 +1034,9 @@ _armv4_AES_decrypt:
1064 and r9,lr,r1,lsr#8 1034 and r9,lr,r1,lsr#8
1065 1035
1066 ldrb r7,[r10,r7] @ Td4[s1>>0] 1036 ldrb r7,[r10,r7] @ Td4[s1>>0]
1067 ldrb r1,[r10,r1,lsr#24] @ Td4[s1>>24] 1037 ARM( ldrb r1,[r10,r1,lsr#24] ) @ Td4[s1>>24]
1038 THUMB( add r1,r10,r1,lsr#24 ) @ Td4[s1>>24]
1039 THUMB( ldrb r1,[r1] )
1068 ldrb r8,[r10,r8] @ Td4[s1>>16] 1040 ldrb r8,[r10,r8] @ Td4[s1>>16]
1069 eor r0,r7,r0,lsl#24 1041 eor r0,r7,r0,lsl#24
1070 ldrb r9,[r10,r9] @ Td4[s1>>8] 1042 ldrb r9,[r10,r9] @ Td4[s1>>8]
@@ -1077,7 +1049,9 @@ _armv4_AES_decrypt:
1077 ldrb r8,[r10,r8] @ Td4[s2>>0] 1049 ldrb r8,[r10,r8] @ Td4[s2>>0]
1078 and r9,lr,r2,lsr#16 1050 and r9,lr,r2,lsr#16
1079 1051
1080 ldrb r2,[r10,r2,lsr#24] @ Td4[s2>>24] 1052 ARM( ldrb r2,[r10,r2,lsr#24] ) @ Td4[s2>>24]
1053 THUMB( add r2,r10,r2,lsr#24 ) @ Td4[s2>>24]
1054 THUMB( ldrb r2,[r2] )
1081 eor r0,r0,r7,lsl#8 1055 eor r0,r0,r7,lsl#8
1082 ldrb r9,[r10,r9] @ Td4[s2>>16] 1056 ldrb r9,[r10,r9] @ Td4[s2>>16]
1083 eor r1,r8,r1,lsl#16 1057 eor r1,r8,r1,lsl#16
@@ -1090,7 +1064,9 @@ _armv4_AES_decrypt:
1090 and r9,lr,r3 @ i2 1064 and r9,lr,r3 @ i2
1091 1065
1092 ldrb r9,[r10,r9] @ Td4[s3>>0] 1066 ldrb r9,[r10,r9] @ Td4[s3>>0]
1093 ldrb r3,[r10,r3,lsr#24] @ Td4[s3>>24] 1067 ARM( ldrb r3,[r10,r3,lsr#24] ) @ Td4[s3>>24]
1068 THUMB( add r3,r10,r3,lsr#24 ) @ Td4[s3>>24]
1069 THUMB( ldrb r3,[r3] )
1094 eor r0,r0,r7,lsl#16 1070 eor r0,r0,r7,lsl#16
1095 ldr r7,[r11,#0] 1071 ldr r7,[r11,#0]
1096 eor r1,r1,r8,lsl#8 1072 eor r1,r1,r8,lsl#8
diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S
index 7050ab133b9d..92c6eed7aac9 100644
--- a/arch/arm/crypto/sha1-armv4-large.S
+++ b/arch/arm/crypto/sha1-armv4-large.S
@@ -51,13 +51,12 @@
51@ Profiler-assisted and platform-specific optimization resulted in 10% 51@ Profiler-assisted and platform-specific optimization resulted in 10%
52@ improvement on Cortex A8 core and 12.2 cycles per byte. 52@ improvement on Cortex A8 core and 12.2 cycles per byte.
53 53
54.text 54#include <linux/linkage.h>
55 55
56.global sha1_block_data_order 56.text
57.type sha1_block_data_order,%function
58 57
59.align 2 58.align 2
60sha1_block_data_order: 59ENTRY(sha1_block_data_order)
61 stmdb sp!,{r4-r12,lr} 60 stmdb sp!,{r4-r12,lr}
62 add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 61 add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
63 ldmia r0,{r3,r4,r5,r6,r7} 62 ldmia r0,{r3,r4,r5,r6,r7}
@@ -194,7 +193,7 @@ sha1_block_data_order:
194 eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) 193 eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
195 str r9,[r14,#-4]! 194 str r9,[r14,#-4]!
196 add r3,r3,r10 @ E+=F_00_19(B,C,D) 195 add r3,r3,r10 @ E+=F_00_19(B,C,D)
197 teq r14,sp 196 cmp r14,sp
198 bne .L_00_15 @ [((11+4)*5+2)*3] 197 bne .L_00_15 @ [((11+4)*5+2)*3]
199#if __ARM_ARCH__<7 198#if __ARM_ARCH__<7
200 ldrb r10,[r1,#2] 199 ldrb r10,[r1,#2]
@@ -374,7 +373,9 @@ sha1_block_data_order:
374 @ F_xx_xx 373 @ F_xx_xx
375 add r3,r3,r9 @ E+=X[i] 374 add r3,r3,r9 @ E+=X[i]
376 add r3,r3,r10 @ E+=F_20_39(B,C,D) 375 add r3,r3,r10 @ E+=F_20_39(B,C,D)
377 teq r14,sp @ preserve carry 376 ARM( teq r14,sp ) @ preserve carry
377 THUMB( mov r11,sp )
378 THUMB( teq r14,r11 ) @ preserve carry
378 bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] 379 bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
379 bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes 380 bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
380 381
@@ -466,7 +467,7 @@ sha1_block_data_order:
466 add r3,r3,r9 @ E+=X[i] 467 add r3,r3,r9 @ E+=X[i]
467 add r3,r3,r10 @ E+=F_40_59(B,C,D) 468 add r3,r3,r10 @ E+=F_40_59(B,C,D)
468 add r3,r3,r11,ror#2 469 add r3,r3,r11,ror#2
469 teq r14,sp 470 cmp r14,sp
470 bne .L_40_59 @ [+((12+5)*5+2)*4] 471 bne .L_40_59 @ [+((12+5)*5+2)*4]
471 472
472 ldr r8,.LK_60_79 473 ldr r8,.LK_60_79
@@ -485,19 +486,12 @@ sha1_block_data_order:
485 teq r1,r2 486 teq r1,r2
486 bne .Lloop @ [+18], total 1307 487 bne .Lloop @ [+18], total 1307
487 488
488#if __ARM_ARCH__>=5
489 ldmia sp!,{r4-r12,pc} 489 ldmia sp!,{r4-r12,pc}
490#else
491 ldmia sp!,{r4-r12,lr}
492 tst lr,#1
493 moveq pc,lr @ be binary compatible with V4, yet
494 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
495#endif
496.align 2 490.align 2
497.LK_00_19: .word 0x5a827999 491.LK_00_19: .word 0x5a827999
498.LK_20_39: .word 0x6ed9eba1 492.LK_20_39: .word 0x6ed9eba1
499.LK_40_59: .word 0x8f1bbcdc 493.LK_40_59: .word 0x8f1bbcdc
500.LK_60_79: .word 0xca62c1d6 494.LK_60_79: .word 0xca62c1d6
501.size sha1_block_data_order,.-sha1_block_data_order 495ENDPROC(sha1_block_data_order)
502.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>" 496.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
503.align 2 497.align 2
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index eb87200aa4b5..05ee9eebad6b 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -246,18 +246,14 @@
246 * 246 *
247 * This macro is intended for forcing the CPU into SVC mode at boot time. 247 * This macro is intended for forcing the CPU into SVC mode at boot time.
248 * you cannot return to the original mode. 248 * you cannot return to the original mode.
249 *
250 * Beware, it also clobers LR.
251 */ 249 */
252.macro safe_svcmode_maskall reg:req 250.macro safe_svcmode_maskall reg:req
253#if __LINUX_ARM_ARCH__ >= 6 251#if __LINUX_ARM_ARCH__ >= 6
254 mrs \reg , cpsr 252 mrs \reg , cpsr
255 mov lr , \reg 253 eor \reg, \reg, #HYP_MODE
256 and lr , lr , #MODE_MASK 254 tst \reg, #MODE_MASK
257 cmp lr , #HYP_MODE
258 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT
259 bic \reg , \reg , #MODE_MASK 255 bic \reg , \reg , #MODE_MASK
260 orr \reg , \reg , #SVC_MODE 256 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
261THUMB( orr \reg , \reg , #PSR_T_BIT ) 257THUMB( orr \reg , \reg , #PSR_T_BIT )
262 bne 1f 258 bne 1f
263 orr \reg, \reg, #PSR_A_BIT 259 orr \reg, \reg, #PSR_A_BIT
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index a59dcb5ab5fc..ad41ec2471e8 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -64,6 +64,24 @@ extern unsigned int processor_id;
64#define read_cpuid_ext(reg) 0 64#define read_cpuid_ext(reg) 0
65#endif 65#endif
66 66
67#define ARM_CPU_IMP_ARM 0x41
68#define ARM_CPU_IMP_INTEL 0x69
69
70#define ARM_CPU_PART_ARM1136 0xB360
71#define ARM_CPU_PART_ARM1156 0xB560
72#define ARM_CPU_PART_ARM1176 0xB760
73#define ARM_CPU_PART_ARM11MPCORE 0xB020
74#define ARM_CPU_PART_CORTEX_A8 0xC080
75#define ARM_CPU_PART_CORTEX_A9 0xC090
76#define ARM_CPU_PART_CORTEX_A5 0xC050
77#define ARM_CPU_PART_CORTEX_A15 0xC0F0
78#define ARM_CPU_PART_CORTEX_A7 0xC070
79
80#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
81#define ARM_CPU_XSCALE_ARCH_V1 0x2000
82#define ARM_CPU_XSCALE_ARCH_V2 0x4000
83#define ARM_CPU_XSCALE_ARCH_V3 0x6000
84
67/* 85/*
68 * The CPU ID never changes at run time, so we might as well tell the 86 * The CPU ID never changes at run time, so we might as well tell the
69 * compiler that it's constant. Use this function to read the CPU ID 87 * compiler that it's constant. Use this function to read the CPU ID
@@ -74,6 +92,21 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
74 return read_cpuid(CPUID_ID); 92 return read_cpuid(CPUID_ID);
75} 93}
76 94
95static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
96{
97 return (read_cpuid_id() & 0xFF000000) >> 24;
98}
99
100static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
101{
102 return read_cpuid_id() & 0xFFF0;
103}
104
105static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
106{
107 return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK;
108}
109
77static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) 110static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
78{ 111{
79 return read_cpuid(CPUID_CACHETYPE); 112 return read_cpuid(CPUID_CACHETYPE);
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
index f2e5cad3f306..2381199acb7d 100644
--- a/arch/arm/include/asm/cti.h
+++ b/arch/arm/include/asm/cti.h
@@ -2,6 +2,7 @@
2#define __ASMARM_CTI_H 2#define __ASMARM_CTI_H
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5#include <asm/hardware/coresight.h>
5 6
6/* The registers' definition is from section 3.2 of 7/* The registers' definition is from section 3.2 of
7 * Embedded Cross Trigger Revision: r0p0 8 * Embedded Cross Trigger Revision: r0p0
@@ -35,11 +36,6 @@
35#define LOCKACCESS 0xFB0 36#define LOCKACCESS 0xFB0
36#define LOCKSTATUS 0xFB4 37#define LOCKSTATUS 0xFB4
37 38
38/* write this value to LOCKACCESS will unlock the module, and
39 * other value will lock the module
40 */
41#define LOCKCODE 0xC5ACCE55
42
43/** 39/**
44 * struct cti - cross trigger interface struct 40 * struct cti - cross trigger interface struct
45 * @base: mapped virtual address for the cti base 41 * @base: mapped virtual address for the cti base
@@ -146,7 +142,7 @@ static inline void cti_irq_ack(struct cti *cti)
146 */ 142 */
147static inline void cti_unlock(struct cti *cti) 143static inline void cti_unlock(struct cti *cti)
148{ 144{
149 __raw_writel(LOCKCODE, cti->base + LOCKACCESS); 145 __raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS);
150} 146}
151 147
152/** 148/**
@@ -158,6 +154,6 @@ static inline void cti_unlock(struct cti *cti)
158 */ 154 */
159static inline void cti_lock(struct cti *cti) 155static inline void cti_lock(struct cti *cti)
160{ 156{
161 __raw_writel(~LOCKCODE, cti->base + LOCKACCESS); 157 __raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS);
162} 158}
163#endif 159#endif
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 7ecd793b8f5a..0cf7a6b842ff 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -36,7 +36,7 @@
36/* CoreSight Component Registers */ 36/* CoreSight Component Registers */
37#define CSCR_CLASS 0xff4 37#define CSCR_CLASS 0xff4
38 38
39#define UNLOCK_MAGIC 0xc5acce55 39#define CS_LAR_KEY 0xc5acce55
40 40
41/* ETM control register, "ETM Architecture", 3.3.1 */ 41/* ETM control register, "ETM Architecture", 3.3.1 */
42#define ETMR_CTRL 0 42#define ETMR_CTRL 0
@@ -147,11 +147,11 @@
147 147
148#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0) 148#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
149#define etm_unlock(t) \ 149#define etm_unlock(t) \
150 do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) 150 do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
151 151
152#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0) 152#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
153#define etb_unlock(t) \ 153#define etb_unlock(t) \
154 do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) 154 do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
155 155
156#endif /* __ASM_HARDWARE_CORESIGHT_H */ 156#endif /* __ASM_HARDWARE_CORESIGHT_H */
157 157
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index 01169dd723f1..eef55ea9ef00 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -85,6 +85,9 @@ static inline void decode_ctrl_reg(u32 reg,
85#define ARM_DSCR_HDBGEN (1 << 14) 85#define ARM_DSCR_HDBGEN (1 << 14)
86#define ARM_DSCR_MDBGEN (1 << 15) 86#define ARM_DSCR_MDBGEN (1 << 15)
87 87
88/* OSLSR os lock model bits */
89#define ARM_OSLSR_OSLM0 (1 << 0)
90
88/* opcode2 numbers for the co-processor instructions. */ 91/* opcode2 numbers for the co-processor instructions. */
89#define ARM_OP2_BVR 4 92#define ARM_OP2_BVR 4
90#define ARM_OP2_BCR 5 93#define ARM_OP2_BCR 5
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
index bf863edb517d..1a66f907e5cc 100644
--- a/arch/arm/include/asm/idmap.h
+++ b/arch/arm/include/asm/idmap.h
@@ -8,6 +8,7 @@
8#define __idmap __section(.idmap.text) noinline notrace 8#define __idmap __section(.idmap.text) noinline notrace
9 9
10extern pgd_t *idmap_pgd; 10extern pgd_t *idmap_pgd;
11extern pgd_t *hyp_pgd;
11 12
12void setup_mm_for_reboot(void); 13void setup_mm_for_reboot(void);
13 14
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
new file mode 100644
index 000000000000..7c3d813e15df
--- /dev/null
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -0,0 +1,214 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_ARM_H__
20#define __ARM_KVM_ARM_H__
21
22#include <linux/types.h>
23
24/* Hyp Configuration Register (HCR) bits */
25#define HCR_TGE (1 << 27)
26#define HCR_TVM (1 << 26)
27#define HCR_TTLB (1 << 25)
28#define HCR_TPU (1 << 24)
29#define HCR_TPC (1 << 23)
30#define HCR_TSW (1 << 22)
31#define HCR_TAC (1 << 21)
32#define HCR_TIDCP (1 << 20)
33#define HCR_TSC (1 << 19)
34#define HCR_TID3 (1 << 18)
35#define HCR_TID2 (1 << 17)
36#define HCR_TID1 (1 << 16)
37#define HCR_TID0 (1 << 15)
38#define HCR_TWE (1 << 14)
39#define HCR_TWI (1 << 13)
40#define HCR_DC (1 << 12)
41#define HCR_BSU (3 << 10)
42#define HCR_BSU_IS (1 << 10)
43#define HCR_FB (1 << 9)
44#define HCR_VA (1 << 8)
45#define HCR_VI (1 << 7)
46#define HCR_VF (1 << 6)
47#define HCR_AMO (1 << 5)
48#define HCR_IMO (1 << 4)
49#define HCR_FMO (1 << 3)
50#define HCR_PTW (1 << 2)
51#define HCR_SWIO (1 << 1)
52#define HCR_VM 1
53
54/*
55 * The bits we set in HCR:
56 * TAC: Trap ACTLR
57 * TSC: Trap SMC
58 * TSW: Trap cache operations by set/way
59 * TWI: Trap WFI
60 * TIDCP: Trap L2CTLR/L2ECTLR
61 * BSU_IS: Upgrade barriers to the inner shareable domain
62 * FB: Force broadcast of all maintainance operations
63 * AMO: Override CPSR.A and enable signaling with VA
64 * IMO: Override CPSR.I and enable signaling with VI
65 * FMO: Override CPSR.F and enable signaling with VF
66 * SWIO: Turn set/way invalidates into set/way clean+invalidate
67 */
68#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
69 HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
70 HCR_SWIO | HCR_TIDCP)
71#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
72
73/* System Control Register (SCTLR) bits */
74#define SCTLR_TE (1 << 30)
75#define SCTLR_EE (1 << 25)
76#define SCTLR_V (1 << 13)
77
78/* Hyp System Control Register (HSCTLR) bits */
79#define HSCTLR_TE (1 << 30)
80#define HSCTLR_EE (1 << 25)
81#define HSCTLR_FI (1 << 21)
82#define HSCTLR_WXN (1 << 19)
83#define HSCTLR_I (1 << 12)
84#define HSCTLR_C (1 << 2)
85#define HSCTLR_A (1 << 1)
86#define HSCTLR_M 1
87#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
88 HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
89
90/* TTBCR and HTCR Registers bits */
91#define TTBCR_EAE (1 << 31)
92#define TTBCR_IMP (1 << 30)
93#define TTBCR_SH1 (3 << 28)
94#define TTBCR_ORGN1 (3 << 26)
95#define TTBCR_IRGN1 (3 << 24)
96#define TTBCR_EPD1 (1 << 23)
97#define TTBCR_A1 (1 << 22)
98#define TTBCR_T1SZ (3 << 16)
99#define TTBCR_SH0 (3 << 12)
100#define TTBCR_ORGN0 (3 << 10)
101#define TTBCR_IRGN0 (3 << 8)
102#define TTBCR_EPD0 (1 << 7)
103#define TTBCR_T0SZ 3
104#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
105
106/* Hyp System Trap Register */
107#define HSTR_T(x) (1 << x)
108#define HSTR_TTEE (1 << 16)
109#define HSTR_TJDBX (1 << 17)
110
111/* Hyp Coprocessor Trap Register */
112#define HCPTR_TCP(x) (1 << x)
113#define HCPTR_TCP_MASK (0x3fff)
114#define HCPTR_TASE (1 << 15)
115#define HCPTR_TTA (1 << 20)
116#define HCPTR_TCPAC (1 << 31)
117
118/* Hyp Debug Configuration Register bits */
119#define HDCR_TDRA (1 << 11)
120#define HDCR_TDOSA (1 << 10)
121#define HDCR_TDA (1 << 9)
122#define HDCR_TDE (1 << 8)
123#define HDCR_HPME (1 << 7)
124#define HDCR_TPM (1 << 6)
125#define HDCR_TPMCR (1 << 5)
126#define HDCR_HPMN_MASK (0x1F)
127
128/*
129 * The architecture supports 40-bit IPA as input to the 2nd stage translations
130 * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
131 * space.
132 */
133#define KVM_PHYS_SHIFT (40)
134#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT)
135#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL)
136#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30))
137#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
138#define S2_PGD_SIZE (1 << S2_PGD_ORDER)
139
140/* Virtualization Translation Control Register (VTCR) bits */
141#define VTCR_SH0 (3 << 12)
142#define VTCR_ORGN0 (3 << 10)
143#define VTCR_IRGN0 (3 << 8)
144#define VTCR_SL0 (3 << 6)
145#define VTCR_S (1 << 4)
146#define VTCR_T0SZ (0xf)
147#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
148 VTCR_S | VTCR_T0SZ)
149#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
150#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
151#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
152#define KVM_VTCR_SL0 VTCR_SL_L1
153/* stage-2 input address range defined as 2^(32-T0SZ) */
154#define KVM_T0SZ (32 - KVM_PHYS_SHIFT)
155#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ)
156#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S)
157
158/* Virtualization Translation Table Base Register (VTTBR) bits */
159#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
160#define VTTBR_X (14 - KVM_T0SZ)
161#else
162#define VTTBR_X (5 - KVM_T0SZ)
163#endif
164#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
165#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
166#define VTTBR_VMID_SHIFT (48LLU)
167#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
168
169/* Hyp Syndrome Register (HSR) bits */
170#define HSR_EC_SHIFT (26)
171#define HSR_EC (0x3fU << HSR_EC_SHIFT)
172#define HSR_IL (1U << 25)
173#define HSR_ISS (HSR_IL - 1)
174#define HSR_ISV_SHIFT (24)
175#define HSR_ISV (1U << HSR_ISV_SHIFT)
176#define HSR_SRT_SHIFT (16)
177#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
178#define HSR_FSC (0x3f)
179#define HSR_FSC_TYPE (0x3c)
180#define HSR_SSE (1 << 21)
181#define HSR_WNR (1 << 6)
182#define HSR_CV_SHIFT (24)
183#define HSR_CV (1U << HSR_CV_SHIFT)
184#define HSR_COND_SHIFT (20)
185#define HSR_COND (0xfU << HSR_COND_SHIFT)
186
187#define FSC_FAULT (0x04)
188#define FSC_PERM (0x0c)
189
190/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
191#define HPFAR_MASK (~0xf)
192
193#define HSR_EC_UNKNOWN (0x00)
194#define HSR_EC_WFI (0x01)
195#define HSR_EC_CP15_32 (0x03)
196#define HSR_EC_CP15_64 (0x04)
197#define HSR_EC_CP14_MR (0x05)
198#define HSR_EC_CP14_LS (0x06)
199#define HSR_EC_CP_0_13 (0x07)
200#define HSR_EC_CP10_ID (0x08)
201#define HSR_EC_JAZELLE (0x09)
202#define HSR_EC_BXJ (0x0A)
203#define HSR_EC_CP14_64 (0x0C)
204#define HSR_EC_SVC_HYP (0x11)
205#define HSR_EC_HVC (0x12)
206#define HSR_EC_SMC (0x13)
207#define HSR_EC_IABT (0x20)
208#define HSR_EC_IABT_HYP (0x21)
209#define HSR_EC_DABT (0x24)
210#define HSR_EC_DABT_HYP (0x25)
211
212#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
213
214#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
new file mode 100644
index 000000000000..5e06e8177784
--- /dev/null
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -0,0 +1,82 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_ASM_H__
20#define __ARM_KVM_ASM_H__
21
22/* 0 is reserved as an invalid value. */
23#define c0_MPIDR 1 /* MultiProcessor ID Register */
24#define c0_CSSELR 2 /* Cache Size Selection Register */
25#define c1_SCTLR 3 /* System Control Register */
26#define c1_ACTLR 4 /* Auxilliary Control Register */
27#define c1_CPACR 5 /* Coprocessor Access Control */
28#define c2_TTBR0 6 /* Translation Table Base Register 0 */
29#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */
30#define c2_TTBR1 8 /* Translation Table Base Register 1 */
31#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */
32#define c2_TTBCR 10 /* Translation Table Base Control R. */
33#define c3_DACR 11 /* Domain Access Control Register */
34#define c5_DFSR 12 /* Data Fault Status Register */
35#define c5_IFSR 13 /* Instruction Fault Status Register */
36#define c5_ADFSR 14 /* Auxilary Data Fault Status R */
37#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */
38#define c6_DFAR 16 /* Data Fault Address Register */
39#define c6_IFAR 17 /* Instruction Fault Address Register */
40#define c9_L2CTLR 18 /* Cortex A15 L2 Control Register */
41#define c10_PRRR 19 /* Primary Region Remap Register */
42#define c10_NMRR 20 /* Normal Memory Remap Register */
43#define c12_VBAR 21 /* Vector Base Address Register */
44#define c13_CID 22 /* Context ID Register */
45#define c13_TID_URW 23 /* Thread ID, User R/W */
46#define c13_TID_URO 24 /* Thread ID, User R/O */
47#define c13_TID_PRIV 25 /* Thread ID, Privileged */
48#define NR_CP15_REGS 26 /* Number of regs (incl. invalid) */
49
50#define ARM_EXCEPTION_RESET 0
51#define ARM_EXCEPTION_UNDEFINED 1
52#define ARM_EXCEPTION_SOFTWARE 2
53#define ARM_EXCEPTION_PREF_ABORT 3
54#define ARM_EXCEPTION_DATA_ABORT 4
55#define ARM_EXCEPTION_IRQ 5
56#define ARM_EXCEPTION_FIQ 6
57#define ARM_EXCEPTION_HVC 7
58
59#ifndef __ASSEMBLY__
60struct kvm;
61struct kvm_vcpu;
62
63extern char __kvm_hyp_init[];
64extern char __kvm_hyp_init_end[];
65
66extern char __kvm_hyp_exit[];
67extern char __kvm_hyp_exit_end[];
68
69extern char __kvm_hyp_vector[];
70
71extern char __kvm_hyp_code_start[];
72extern char __kvm_hyp_code_end[];
73
74extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
75
76extern void __kvm_flush_vm_context(void);
77extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
78
79extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
80#endif
81
82#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
new file mode 100644
index 000000000000..4917c2f7e459
--- /dev/null
+++ b/arch/arm/include/asm/kvm_coproc.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (C) 2012 Rusty Russell IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 */
17
18#ifndef __ARM_KVM_COPROC_H__
19#define __ARM_KVM_COPROC_H__
20#include <linux/kvm_host.h>
21
22void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
23
24struct kvm_coproc_target_table {
25 unsigned target;
26 const struct coproc_reg *table;
27 size_t num;
28};
29void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
30
31int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
32int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
33int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
34int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
35int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
36int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
37
38unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
39int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
40void kvm_coproc_table_init(void);
41
42struct kvm_one_reg;
43int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
44int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
45int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
46unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
47#endif /* __ARM_KVM_COPROC_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
new file mode 100644
index 000000000000..fd611996bfb5
--- /dev/null
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_EMULATE_H__
20#define __ARM_KVM_EMULATE_H__
21
22#include <linux/kvm_host.h>
23#include <asm/kvm_asm.h>
24#include <asm/kvm_mmio.h>
25
26u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
27u32 *vcpu_spsr(struct kvm_vcpu *vcpu);
28
29int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run);
30void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
31void kvm_inject_undefined(struct kvm_vcpu *vcpu);
32void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
33void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
34
35static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
36{
37 return 1;
38}
39
40static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu)
41{
42 return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc;
43}
44
45static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu)
46{
47 return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr;
48}
49
50static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
51{
52 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
53}
54
55static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
56{
57 unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
58 return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
59}
60
61static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
62{
63 unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
64 return cpsr_mode > USR_MODE;;
65}
66
67static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
68{
69 return reg == 15;
70}
71
72#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
new file mode 100644
index 000000000000..98b4d1a72923
--- /dev/null
+++ b/arch/arm/include/asm/kvm_host.h
@@ -0,0 +1,161 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_HOST_H__
20#define __ARM_KVM_HOST_H__
21
22#include <asm/kvm.h>
23#include <asm/kvm_asm.h>
24#include <asm/kvm_mmio.h>
25#include <asm/fpstate.h>
26
27#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
28#define KVM_MEMORY_SLOTS 32
29#define KVM_PRIVATE_MEM_SLOTS 4
30#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
31#define KVM_HAVE_ONE_REG
32
33#define KVM_VCPU_MAX_FEATURES 1
34
35/* We don't currently support large pages. */
36#define KVM_HPAGE_GFN_SHIFT(x) 0
37#define KVM_NR_PAGE_SIZES 1
38#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
39
40struct kvm_vcpu;
41u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
42int kvm_target_cpu(void);
43int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
44void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
45
46struct kvm_arch {
47 /* VTTBR value associated with below pgd and vmid */
48 u64 vttbr;
49
50 /*
51 * Anything that is not used directly from assembly code goes
52 * here.
53 */
54
55 /* The VMID generation used for the virt. memory system */
56 u64 vmid_gen;
57 u32 vmid;
58
59 /* Stage-2 page table */
60 pgd_t *pgd;
61};
62
63#define KVM_NR_MEM_OBJS 40
64
65/*
66 * We don't want allocation failures within the mmu code, so we preallocate
67 * enough memory for a single page fault in a cache.
68 */
69struct kvm_mmu_memory_cache {
70 int nobjs;
71 void *objects[KVM_NR_MEM_OBJS];
72};
73
74struct kvm_vcpu_arch {
75 struct kvm_regs regs;
76
77 int target; /* Processor target */
78 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
79
80 /* System control coprocessor (cp15) */
81 u32 cp15[NR_CP15_REGS];
82
83 /* The CPU type we expose to the VM */
84 u32 midr;
85
86 /* Exception Information */
87 u32 hsr; /* Hyp Syndrome Register */
88 u32 hxfar; /* Hyp Data/Inst Fault Address Register */
89 u32 hpfar; /* Hyp IPA Fault Address Register */
90
91 /* Floating point registers (VFP and Advanced SIMD/NEON) */
92 struct vfp_hard_struct vfp_guest;
93 struct vfp_hard_struct *vfp_host;
94
95 /*
96 * Anything that is not used directly from assembly code goes
97 * here.
98 */
99 /* dcache set/way operation pending */
100 int last_pcpu;
101 cpumask_t require_dcache_flush;
102
103 /* Don't run the guest on this vcpu */
104 bool pause;
105
106 /* IO related fields */
107 struct kvm_decode mmio_decode;
108
109 /* Interrupt related fields */
110 u32 irq_lines; /* IRQ and FIQ levels */
111
112 /* Hyp exception information */
113 u32 hyp_pc; /* PC when exception was taken from Hyp mode */
114
115 /* Cache some mmu pages needed inside spinlock regions */
116 struct kvm_mmu_memory_cache mmu_page_cache;
117
118 /* Detect first run of a vcpu */
119 bool has_run_once;
120};
121
122struct kvm_vm_stat {
123 u32 remote_tlb_flush;
124};
125
126struct kvm_vcpu_stat {
127 u32 halt_wakeup;
128};
129
130struct kvm_vcpu_init;
131int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
132 const struct kvm_vcpu_init *init);
133unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
134int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
135struct kvm_one_reg;
136int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
137int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
138u64 kvm_call_hyp(void *hypfn, ...);
139void force_vm_exit(const cpumask_t *mask);
140
141#define KVM_ARCH_WANT_MMU_NOTIFIER
142struct kvm;
143int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
144int kvm_unmap_hva_range(struct kvm *kvm,
145 unsigned long start, unsigned long end);
146void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
147
148unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
149int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
150
151/* We do not have shadow page tables, hence the empty hooks */
152static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
153{
154 return 0;
155}
156
157static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
158{
159 return 0;
160}
161#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
new file mode 100644
index 000000000000..adcc0d7d3175
--- /dev/null
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_MMIO_H__
20#define __ARM_KVM_MMIO_H__
21
22#include <linux/kvm_host.h>
23#include <asm/kvm_asm.h>
24#include <asm/kvm_arm.h>
25
26struct kvm_decode {
27 unsigned long rt;
28 bool sign_extend;
29};
30
31/*
32 * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
33 * which is an anonymous type. Use our own type instead.
34 */
35struct kvm_exit_mmio {
36 phys_addr_t phys_addr;
37 u8 data[8];
38 u32 len;
39 bool is_write;
40};
41
42static inline void kvm_prepare_mmio(struct kvm_run *run,
43 struct kvm_exit_mmio *mmio)
44{
45 run->mmio.phys_addr = mmio->phys_addr;
46 run->mmio.len = mmio->len;
47 run->mmio.is_write = mmio->is_write;
48 memcpy(run->mmio.data, mmio->data, mmio->len);
49 run->exit_reason = KVM_EXIT_MMIO;
50}
51
52int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
53int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
54 phys_addr_t fault_ipa);
55
56#endif /* __ARM_KVM_MMIO_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
new file mode 100644
index 000000000000..421a20b34874
--- /dev/null
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_MMU_H__
20#define __ARM_KVM_MMU_H__
21
22int create_hyp_mappings(void *from, void *to);
23int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
24void free_hyp_pmds(void);
25
26int kvm_alloc_stage2_pgd(struct kvm *kvm);
27void kvm_free_stage2_pgd(struct kvm *kvm);
28int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
29 phys_addr_t pa, unsigned long size);
30
31int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
32
33void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
34
35phys_addr_t kvm_mmu_get_httbr(void);
36int kvm_mmu_init(void);
37void kvm_clear_hyp_idmap(void);
38
39static inline bool kvm_is_write_fault(unsigned long hsr)
40{
41 unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
42 if (hsr_ec == HSR_EC_IABT)
43 return false;
44 else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
45 return false;
46 else
47 return true;
48}
49
50#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h
new file mode 100644
index 000000000000..9a83d98bf170
--- /dev/null
+++ b/arch/arm/include/asm/kvm_psci.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2012 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM_KVM_PSCI_H__
19#define __ARM_KVM_PSCI_H__
20
21bool kvm_psci_call(struct kvm_vcpu *vcpu);
22
23#endif /* __ARM_KVM_PSCI_H__ */
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index db9fedb57f2c..5cf2e979b4be 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -23,6 +23,7 @@ struct hw_pci {
23#endif 23#endif
24 struct pci_ops *ops; 24 struct pci_ops *ops;
25 int nr_controllers; 25 int nr_controllers;
26 void **private_data;
26 int (*setup)(int nr, struct pci_sys_data *); 27 int (*setup)(int nr, struct pci_sys_data *);
27 struct pci_bus *(*scan)(int nr, struct pci_sys_data *); 28 struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
28 void (*preinit)(void); 29 void (*preinit)(void);
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 73cf03aa981e..64c770d24198 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -36,23 +36,23 @@
36 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area 36 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
37 */ 37 */
38#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) 38#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
39#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) 39#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
40#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3) 40#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
41 41
42/* 42/*
43 * The maximum size of a 26-bit user space task. 43 * The maximum size of a 26-bit user space task.
44 */ 44 */
45#define TASK_SIZE_26 UL(0x04000000) 45#define TASK_SIZE_26 (UL(1) << 26)
46 46
47/* 47/*
48 * The module space lives between the addresses given by TASK_SIZE 48 * The module space lives between the addresses given by TASK_SIZE
49 * and PAGE_OFFSET - it must be within 32MB of the kernel text. 49 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
50 */ 50 */
51#ifndef CONFIG_THUMB2_KERNEL 51#ifndef CONFIG_THUMB2_KERNEL
52#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) 52#define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
53#else 53#else
54/* smaller range for Thumb-2 symbols relocation (2^24)*/ 54/* smaller range for Thumb-2 symbols relocation (2^24)*/
55#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024) 55#define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
56#endif 56#endif
57 57
58#if TASK_SIZE > MODULES_VADDR 58#if TASK_SIZE > MODULES_VADDR
diff --git a/arch/arm/include/asm/opcodes-sec.h b/arch/arm/include/asm/opcodes-sec.h
new file mode 100644
index 000000000000..bc3a9174417c
--- /dev/null
+++ b/arch/arm/include/asm/opcodes-sec.h
@@ -0,0 +1,24 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2012 ARM Limited
12 */
13
14#ifndef __ASM_ARM_OPCODES_SEC_H
15#define __ASM_ARM_OPCODES_SEC_H
16
17#include <asm/opcodes.h>
18
19#define __SMC(imm4) __inst_arm_thumb32( \
20 0xE1600070 | (((imm4) & 0xF) << 0), \
21 0xF7F08000 | (((imm4) & 0xF) << 16) \
22)
23
24#endif /* __ASM_ARM_OPCODES_SEC_H */
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
index 74e211a6fb24..e796c598513b 100644
--- a/arch/arm/include/asm/opcodes.h
+++ b/arch/arm/include/asm/opcodes.h
@@ -10,6 +10,7 @@
10#define __ASM_ARM_OPCODES_H 10#define __ASM_ARM_OPCODES_H
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13#include <linux/linkage.h>
13extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); 14extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
14#endif 15#endif
15 16
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 53426c66352a..12f71a190422 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -92,6 +92,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
92static inline void outer_flush_all(void) { } 92static inline void outer_flush_all(void) { }
93static inline void outer_inv_all(void) { } 93static inline void outer_inv_all(void) { }
94static inline void outer_disable(void) { } 94static inline void outer_disable(void) { }
95static inline void outer_resume(void) { }
95 96
96#endif 97#endif
97 98
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index d7952824c5c4..18f5cef82ad5 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -32,6 +32,9 @@
32#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) 32#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
33#define PMD_BIT4 (_AT(pmdval_t, 0)) 33#define PMD_BIT4 (_AT(pmdval_t, 0))
34#define PMD_DOMAIN(x) (_AT(pmdval_t, 0)) 34#define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
35#define PMD_APTABLE_SHIFT (61)
36#define PMD_APTABLE (_AT(pgdval_t, 3) << PGD_APTABLE_SHIFT)
37#define PMD_PXNTABLE (_AT(pgdval_t, 1) << 59)
35 38
36/* 39/*
37 * - section 40 * - section
@@ -41,9 +44,11 @@
41#define PMD_SECT_S (_AT(pmdval_t, 3) << 8) 44#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
42#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) 45#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
43#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) 46#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
47#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
44#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54) 48#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
45#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0)) 49#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
46#define PMD_SECT_AP_READ (_AT(pmdval_t, 0)) 50#define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
51#define PMD_SECT_AP1 (_AT(pmdval_t, 1) << 6)
47#define PMD_SECT_TEX(x) (_AT(pmdval_t, 0)) 52#define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
48 53
49/* 54/*
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index a3f37929940a..6ef8afd1b64c 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -104,11 +104,29 @@
104 */ 104 */
105#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */ 105#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
106 106
107/*
108 * 2nd stage PTE definitions for LPAE.
109 */
110#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
111#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
112#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
113#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
114#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */
115
116/*
117 * Hyp-mode PL2 PTE definitions for LPAE.
118 */
119#define L_PTE_HYP L_PTE_USER
120
107#ifndef __ASSEMBLY__ 121#ifndef __ASSEMBLY__
108 122
109#define pud_none(pud) (!pud_val(pud)) 123#define pud_none(pud) (!pud_val(pud))
110#define pud_bad(pud) (!(pud_val(pud) & 2)) 124#define pud_bad(pud) (!(pud_val(pud) & 2))
111#define pud_present(pud) (pud_val(pud)) 125#define pud_present(pud) (pud_val(pud))
126#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
127 PMD_TYPE_TABLE)
128#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
129 PMD_TYPE_SECT)
112 130
113#define pud_clear(pudp) \ 131#define pud_clear(pudp) \
114 do { \ 132 do { \
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 9c82f988c0e3..f30ac3b55ba9 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -70,6 +70,9 @@ extern void __pgd_error(const char *file, int line, pgd_t);
70 70
71extern pgprot_t pgprot_user; 71extern pgprot_t pgprot_user;
72extern pgprot_t pgprot_kernel; 72extern pgprot_t pgprot_kernel;
73extern pgprot_t pgprot_hyp_device;
74extern pgprot_t pgprot_s2;
75extern pgprot_t pgprot_s2_device;
73 76
74#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) 77#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
75 78
@@ -82,6 +85,10 @@ extern pgprot_t pgprot_kernel;
82#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) 85#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
83#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) 86#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
84#define PAGE_KERNEL_EXEC pgprot_kernel 87#define PAGE_KERNEL_EXEC pgprot_kernel
88#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
89#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
90#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
91#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY)
85 92
86#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) 93#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
87#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) 94#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
new file mode 100644
index 000000000000..ce0dbe7c1625
--- /dev/null
+++ b/arch/arm/include/asm/psci.h
@@ -0,0 +1,36 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2012 ARM Limited
12 */
13
14#ifndef __ASM_ARM_PSCI_H
15#define __ASM_ARM_PSCI_H
16
17#define PSCI_POWER_STATE_TYPE_STANDBY 0
18#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
19
20struct psci_power_state {
21 u16 id;
22 u8 type;
23 u8 affinity_level;
24};
25
26struct psci_operations {
27 int (*cpu_suspend)(struct psci_power_state state,
28 unsigned long entry_point);
29 int (*cpu_off)(struct psci_power_state state);
30 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
31 int (*migrate)(unsigned long cpuid);
32};
33
34extern struct psci_operations psci_ops;
35
36#endif /* __ASM_ARM_PSCI_H */
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h
index 4eb6d005ffaa..86dff32a0737 100644
--- a/arch/arm/include/asm/smp_scu.h
+++ b/arch/arm/include/asm/smp_scu.h
@@ -7,8 +7,14 @@
7 7
8#ifndef __ASSEMBLER__ 8#ifndef __ASSEMBLER__
9unsigned int scu_get_core_count(void __iomem *); 9unsigned int scu_get_core_count(void __iomem *);
10void scu_enable(void __iomem *);
11int scu_power_mode(void __iomem *, unsigned int); 10int scu_power_mode(void __iomem *, unsigned int);
11
12#ifdef CONFIG_SMP
13void scu_enable(void __iomem *scu_base);
14#else
15static inline void scu_enable(void __iomem *scu_base) {}
16#endif
17
12#endif 18#endif
13 19
14#endif 20#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index b4ca707d0a69..6220e9fdf4c7 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
119 119
120static inline void arch_spin_unlock(arch_spinlock_t *lock) 120static inline void arch_spin_unlock(arch_spinlock_t *lock)
121{ 121{
122 unsigned long tmp;
123 u32 slock;
124
125 smp_mb(); 122 smp_mb();
126 123 lock->tickets.owner++;
127 __asm__ __volatile__(
128" mov %1, #1\n"
129"1: ldrex %0, [%2]\n"
130" uadd16 %0, %0, %1\n"
131" strex %1, %0, [%2]\n"
132" teq %1, #0\n"
133" bne 1b"
134 : "=&r" (slock), "=&r" (tmp)
135 : "r" (&lock->slock)
136 : "cc");
137
138 dsb_sev(); 124 dsb_sev();
139} 125}
140 126
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 86164df86cb4..50af92bac737 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -24,9 +24,9 @@
24/* 24/*
25 * Flag indicating that the kernel was not entered in the same mode on every 25 * Flag indicating that the kernel was not entered in the same mode on every
26 * CPU. The zImage loader stashes this value in an SPSR, so we need an 26 * CPU. The zImage loader stashes this value in an SPSR, so we need an
27 * architecturally defined flag bit here (the N flag, as it happens) 27 * architecturally defined flag bit here.
28 */ 28 */
29#define BOOT_CPU_MODE_MISMATCH (1<<31) 29#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
30 30
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
32 32
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
new file mode 100644
index 000000000000..3303ff5adbf3
--- /dev/null
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -0,0 +1,164 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_H__
20#define __ARM_KVM_H__
21
22#include <linux/types.h>
23#include <asm/ptrace.h>
24
25#define __KVM_HAVE_GUEST_DEBUG
26#define __KVM_HAVE_IRQ_LINE
27
28#define KVM_REG_SIZE(id) \
29 (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
30
31/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */
32#define KVM_ARM_SVC_sp svc_regs[0]
33#define KVM_ARM_SVC_lr svc_regs[1]
34#define KVM_ARM_SVC_spsr svc_regs[2]
35#define KVM_ARM_ABT_sp abt_regs[0]
36#define KVM_ARM_ABT_lr abt_regs[1]
37#define KVM_ARM_ABT_spsr abt_regs[2]
38#define KVM_ARM_UND_sp und_regs[0]
39#define KVM_ARM_UND_lr und_regs[1]
40#define KVM_ARM_UND_spsr und_regs[2]
41#define KVM_ARM_IRQ_sp irq_regs[0]
42#define KVM_ARM_IRQ_lr irq_regs[1]
43#define KVM_ARM_IRQ_spsr irq_regs[2]
44
45/* Valid only for fiq_regs in struct kvm_regs */
46#define KVM_ARM_FIQ_r8 fiq_regs[0]
47#define KVM_ARM_FIQ_r9 fiq_regs[1]
48#define KVM_ARM_FIQ_r10 fiq_regs[2]
49#define KVM_ARM_FIQ_fp fiq_regs[3]
50#define KVM_ARM_FIQ_ip fiq_regs[4]
51#define KVM_ARM_FIQ_sp fiq_regs[5]
52#define KVM_ARM_FIQ_lr fiq_regs[6]
53#define KVM_ARM_FIQ_spsr fiq_regs[7]
54
55struct kvm_regs {
56 struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */
57 __u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
58 __u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
59 __u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */
60 __u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
61 __u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
62};
63
64/* Supported Processor Types */
65#define KVM_ARM_TARGET_CORTEX_A15 0
66#define KVM_ARM_NUM_TARGETS 1
67
68#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
69
70struct kvm_vcpu_init {
71 __u32 target;
72 __u32 features[7];
73};
74
75struct kvm_sregs {
76};
77
78struct kvm_fpu {
79};
80
81struct kvm_guest_debug_arch {
82};
83
84struct kvm_debug_exit_arch {
85};
86
87struct kvm_sync_regs {
88};
89
90struct kvm_arch_memory_slot {
91};
92
93/* If you need to interpret the index values, here is the key: */
94#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
95#define KVM_REG_ARM_COPROC_SHIFT 16
96#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007
97#define KVM_REG_ARM_32_OPC2_SHIFT 0
98#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078
99#define KVM_REG_ARM_OPC1_SHIFT 3
100#define KVM_REG_ARM_CRM_MASK 0x0000000000000780
101#define KVM_REG_ARM_CRM_SHIFT 7
102#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
103#define KVM_REG_ARM_32_CRN_SHIFT 11
104
105/* Normal registers are mapped as coprocessor 16. */
106#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
107#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4)
108
109/* Some registers need more space to represent values. */
110#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
111#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00
112#define KVM_REG_ARM_DEMUX_ID_SHIFT 8
113#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
114#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF
115#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0
116
117/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */
118#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT)
119#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF
120#define KVM_REG_ARM_VFP_BASE_REG 0x0
121#define KVM_REG_ARM_VFP_FPSID 0x1000
122#define KVM_REG_ARM_VFP_FPSCR 0x1001
123#define KVM_REG_ARM_VFP_MVFR1 0x1006
124#define KVM_REG_ARM_VFP_MVFR0 0x1007
125#define KVM_REG_ARM_VFP_FPEXC 0x1008
126#define KVM_REG_ARM_VFP_FPINST 0x1009
127#define KVM_REG_ARM_VFP_FPINST2 0x100A
128
129
130/* KVM_IRQ_LINE irq field index values */
131#define KVM_ARM_IRQ_TYPE_SHIFT 24
132#define KVM_ARM_IRQ_TYPE_MASK 0xff
133#define KVM_ARM_IRQ_VCPU_SHIFT 16
134#define KVM_ARM_IRQ_VCPU_MASK 0xff
135#define KVM_ARM_IRQ_NUM_SHIFT 0
136#define KVM_ARM_IRQ_NUM_MASK 0xffff
137
138/* irq_type field */
139#define KVM_ARM_IRQ_TYPE_CPU 0
140#define KVM_ARM_IRQ_TYPE_SPI 1
141#define KVM_ARM_IRQ_TYPE_PPI 2
142
143/* out-of-kernel GIC cpu interrupt injection irq_number field */
144#define KVM_ARM_IRQ_CPU_IRQ 0
145#define KVM_ARM_IRQ_CPU_FIQ 1
146
147/* Highest supported SPI, from VGIC_NR_IRQS */
148#define KVM_ARM_IRQ_GIC_MAX 127
149
150/* PSCI interface */
151#define KVM_PSCI_FN_BASE 0x95c1ba5e
152#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
153
154#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0)
155#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1)
156#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
157#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
158
159#define KVM_PSCI_RET_SUCCESS 0
160#define KVM_PSCI_RET_NI ((unsigned long)-1)
161#define KVM_PSCI_RET_INVAL ((unsigned long)-2)
162#define KVM_PSCI_RET_DENIED ((unsigned long)-3)
163
164#endif /* __ARM_KVM_H__ */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5bbec7b8183e..5f3338eacad2 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -82,5 +82,6 @@ obj-$(CONFIG_DEBUG_LL) += debug.o
82obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 82obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
83 83
84obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o 84obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
85obj-$(CONFIG_ARM_PSCI) += psci.o
85 86
86extra-y := $(head-y) vmlinux.lds 87extra-y := $(head-y) vmlinux.lds
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index c985b481192c..c8b3272dfed1 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -13,6 +13,9 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/dma-mapping.h> 15#include <linux/dma-mapping.h>
16#ifdef CONFIG_KVM_ARM_HOST
17#include <linux/kvm_host.h>
18#endif
16#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
17#include <asm/glue-df.h> 20#include <asm/glue-df.h>
18#include <asm/glue-pf.h> 21#include <asm/glue-pf.h>
@@ -146,5 +149,27 @@ int main(void)
146 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); 149 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
147 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); 150 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
148 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); 151 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
152#ifdef CONFIG_KVM_ARM_HOST
153 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
154 DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr));
155 DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15));
156 DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest));
157 DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.vfp_host));
158 DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs));
159 DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs));
160 DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs));
161 DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs));
162 DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs));
163 DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs));
164 DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
165 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
166 DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
167 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
168 DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr));
169 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar));
170 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar));
171 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc));
172 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
173#endif
149 return 0; 174 return 0;
150} 175}
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 379cf3292390..a1f73b502ef0 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
413 return irq; 413 return irq;
414} 414}
415 415
416static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) 416static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
417{ 417{
418 int ret; 418 int ret;
419 struct pci_host_bridge_window *window; 419 struct pci_host_bridge_window *window;
@@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys)
445 return 0; 445 return 0;
446} 446}
447 447
448static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) 448static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
449{ 449{
450 struct pci_sys_data *sys = NULL; 450 struct pci_sys_data *sys = NULL;
451 int ret; 451 int ret;
@@ -464,6 +464,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
464 sys->map_irq = hw->map_irq; 464 sys->map_irq = hw->map_irq;
465 INIT_LIST_HEAD(&sys->resources); 465 INIT_LIST_HEAD(&sys->resources);
466 466
467 if (hw->private_data)
468 sys->private_data = hw->private_data[nr];
469
467 ret = hw->setup(nr, sys); 470 ret = hw->setup(nr, sys);
468 471
469 if (ret > 0) { 472 if (ret > 0) {
@@ -493,7 +496,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
493 } 496 }
494} 497}
495 498
496void __init pci_common_init(struct hw_pci *hw) 499void pci_common_init(struct hw_pci *hw)
497{ 500{
498 struct pci_sys_data *sys; 501 struct pci_sys_data *sys;
499 LIST_HEAD(head); 502 LIST_HEAD(head);
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 5ff2e77782b1..5eae53e7a2e1 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -28,6 +28,7 @@
28#include <linux/perf_event.h> 28#include <linux/perf_event.h>
29#include <linux/hw_breakpoint.h> 29#include <linux/hw_breakpoint.h>
30#include <linux/smp.h> 30#include <linux/smp.h>
31#include <linux/cpu_pm.h>
31 32
32#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
33#include <asm/cputype.h> 34#include <asm/cputype.h>
@@ -35,6 +36,7 @@
35#include <asm/hw_breakpoint.h> 36#include <asm/hw_breakpoint.h>
36#include <asm/kdebug.h> 37#include <asm/kdebug.h>
37#include <asm/traps.h> 38#include <asm/traps.h>
39#include <asm/hardware/coresight.h>
38 40
39/* Breakpoint currently in use for each BRP. */ 41/* Breakpoint currently in use for each BRP. */
40static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); 42static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
@@ -49,6 +51,9 @@ static int core_num_wrps;
49/* Debug architecture version. */ 51/* Debug architecture version. */
50static u8 debug_arch; 52static u8 debug_arch;
51 53
54/* Does debug architecture support OS Save and Restore? */
55static bool has_ossr;
56
52/* Maximum supported watchpoint length. */ 57/* Maximum supported watchpoint length. */
53static u8 max_watchpoint_len; 58static u8 max_watchpoint_len;
54 59
@@ -903,6 +908,23 @@ static struct undef_hook debug_reg_hook = {
903 .fn = debug_reg_trap, 908 .fn = debug_reg_trap,
904}; 909};
905 910
911/* Does this core support OS Save and Restore? */
912static bool core_has_os_save_restore(void)
913{
914 u32 oslsr;
915
916 switch (get_debug_arch()) {
917 case ARM_DEBUG_ARCH_V7_1:
918 return true;
919 case ARM_DEBUG_ARCH_V7_ECP14:
920 ARM_DBG_READ(c1, c1, 4, oslsr);
921 if (oslsr & ARM_OSLSR_OSLM0)
922 return true;
923 default:
924 return false;
925 }
926}
927
906static void reset_ctrl_regs(void *unused) 928static void reset_ctrl_regs(void *unused)
907{ 929{
908 int i, raw_num_brps, err = 0, cpu = smp_processor_id(); 930 int i, raw_num_brps, err = 0, cpu = smp_processor_id();
@@ -930,11 +952,7 @@ static void reset_ctrl_regs(void *unused)
930 if ((val & 0x1) == 0) 952 if ((val & 0x1) == 0)
931 err = -EPERM; 953 err = -EPERM;
932 954
933 /* 955 if (!has_ossr)
934 * Check whether we implement OS save and restore.
935 */
936 ARM_DBG_READ(c1, c1, 4, val);
937 if ((val & 0x9) == 0)
938 goto clear_vcr; 956 goto clear_vcr;
939 break; 957 break;
940 case ARM_DEBUG_ARCH_V7_1: 958 case ARM_DEBUG_ARCH_V7_1:
@@ -955,9 +973,9 @@ static void reset_ctrl_regs(void *unused)
955 973
956 /* 974 /*
957 * Unconditionally clear the OS lock by writing a value 975 * Unconditionally clear the OS lock by writing a value
958 * other than 0xC5ACCE55 to the access register. 976 * other than CS_LAR_KEY to the access register.
959 */ 977 */
960 ARM_DBG_WRITE(c1, c0, 4, 0); 978 ARM_DBG_WRITE(c1, c0, 4, ~CS_LAR_KEY);
961 isb(); 979 isb();
962 980
963 /* 981 /*
@@ -1015,6 +1033,30 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
1015 .notifier_call = dbg_reset_notify, 1033 .notifier_call = dbg_reset_notify,
1016}; 1034};
1017 1035
1036#ifdef CONFIG_CPU_PM
1037static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
1038 void *v)
1039{
1040 if (action == CPU_PM_EXIT)
1041 reset_ctrl_regs(NULL);
1042
1043 return NOTIFY_OK;
1044}
1045
1046static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = {
1047 .notifier_call = dbg_cpu_pm_notify,
1048};
1049
1050static void __init pm_init(void)
1051{
1052 cpu_pm_register_notifier(&dbg_cpu_pm_nb);
1053}
1054#else
1055static inline void pm_init(void)
1056{
1057}
1058#endif
1059
1018static int __init arch_hw_breakpoint_init(void) 1060static int __init arch_hw_breakpoint_init(void)
1019{ 1061{
1020 debug_arch = get_debug_arch(); 1062 debug_arch = get_debug_arch();
@@ -1024,6 +1066,8 @@ static int __init arch_hw_breakpoint_init(void)
1024 return 0; 1066 return 0;
1025 } 1067 }
1026 1068
1069 has_ossr = core_has_os_save_restore();
1070
1027 /* Determine how many BRPs/WRPs are available. */ 1071 /* Determine how many BRPs/WRPs are available. */
1028 core_num_brps = get_num_brps(); 1072 core_num_brps = get_num_brps();
1029 core_num_wrps = get_num_wrps(); 1073 core_num_wrps = get_num_wrps();
@@ -1062,8 +1106,9 @@ static int __init arch_hw_breakpoint_init(void)
1062 hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, 1106 hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
1063 TRAP_HWBKPT, "breakpoint debug exception"); 1107 TRAP_HWBKPT, "breakpoint debug exception");
1064 1108
1065 /* Register hotplug notifier. */ 1109 /* Register hotplug and PM notifiers. */
1066 register_cpu_notifier(&dbg_reset_nb); 1110 register_cpu_notifier(&dbg_reset_nb);
1111 pm_init();
1067 return 0; 1112 return 0;
1068} 1113}
1069arch_initcall(arch_hw_breakpoint_init); 1114arch_initcall(arch_hw_breakpoint_init);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index f9e8657dd241..31e0eb353cd8 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -149,12 +149,6 @@ again:
149static void 149static void
150armpmu_read(struct perf_event *event) 150armpmu_read(struct perf_event *event)
151{ 151{
152 struct hw_perf_event *hwc = &event->hw;
153
154 /* Don't read disabled counters! */
155 if (hwc->idx < 0)
156 return;
157
158 armpmu_event_update(event); 152 armpmu_event_update(event);
159} 153}
160 154
@@ -207,8 +201,6 @@ armpmu_del(struct perf_event *event, int flags)
207 struct hw_perf_event *hwc = &event->hw; 201 struct hw_perf_event *hwc = &event->hw;
208 int idx = hwc->idx; 202 int idx = hwc->idx;
209 203
210 WARN_ON(idx < 0);
211
212 armpmu_stop(event, PERF_EF_UPDATE); 204 armpmu_stop(event, PERF_EF_UPDATE);
213 hw_events->events[idx] = NULL; 205 hw_events->events[idx] = NULL;
214 clear_bit(idx, hw_events->used_mask); 206 clear_bit(idx, hw_events->used_mask);
@@ -358,7 +350,7 @@ __hw_perf_event_init(struct perf_event *event)
358{ 350{
359 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 351 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
360 struct hw_perf_event *hwc = &event->hw; 352 struct hw_perf_event *hwc = &event->hw;
361 int mapping, err; 353 int mapping;
362 354
363 mapping = armpmu->map_event(event); 355 mapping = armpmu->map_event(event);
364 356
@@ -407,14 +399,12 @@ __hw_perf_event_init(struct perf_event *event)
407 local64_set(&hwc->period_left, hwc->sample_period); 399 local64_set(&hwc->period_left, hwc->sample_period);
408 } 400 }
409 401
410 err = 0;
411 if (event->group_leader != event) { 402 if (event->group_leader != event) {
412 err = validate_group(event); 403 if (validate_group(event) != 0);
413 if (err)
414 return -EINVAL; 404 return -EINVAL;
415 } 405 }
416 406
417 return err; 407 return 0;
418} 408}
419 409
420static int armpmu_event_init(struct perf_event *event) 410static int armpmu_event_init(struct perf_event *event)
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 5f6620684e25..1f2740e3dbc0 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -147,7 +147,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
147 cpu_pmu->free_irq = cpu_pmu_free_irq; 147 cpu_pmu->free_irq = cpu_pmu_free_irq;
148 148
149 /* Ensure the PMU has sane values out of reset. */ 149 /* Ensure the PMU has sane values out of reset. */
150 if (cpu_pmu && cpu_pmu->reset) 150 if (cpu_pmu->reset)
151 on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); 151 on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
152} 152}
153 153
@@ -201,48 +201,46 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = {
201static int probe_current_pmu(struct arm_pmu *pmu) 201static int probe_current_pmu(struct arm_pmu *pmu)
202{ 202{
203 int cpu = get_cpu(); 203 int cpu = get_cpu();
204 unsigned long cpuid = read_cpuid_id(); 204 unsigned long implementor = read_cpuid_implementor();
205 unsigned long implementor = (cpuid & 0xFF000000) >> 24; 205 unsigned long part_number = read_cpuid_part_number();
206 unsigned long part_number = (cpuid & 0xFFF0);
207 int ret = -ENODEV; 206 int ret = -ENODEV;
208 207
209 pr_info("probing PMU on CPU %d\n", cpu); 208 pr_info("probing PMU on CPU %d\n", cpu);
210 209
211 /* ARM Ltd CPUs. */ 210 /* ARM Ltd CPUs. */
212 if (0x41 == implementor) { 211 if (implementor == ARM_CPU_IMP_ARM) {
213 switch (part_number) { 212 switch (part_number) {
214 case 0xB360: /* ARM1136 */ 213 case ARM_CPU_PART_ARM1136:
215 case 0xB560: /* ARM1156 */ 214 case ARM_CPU_PART_ARM1156:
216 case 0xB760: /* ARM1176 */ 215 case ARM_CPU_PART_ARM1176:
217 ret = armv6pmu_init(pmu); 216 ret = armv6pmu_init(pmu);
218 break; 217 break;
219 case 0xB020: /* ARM11mpcore */ 218 case ARM_CPU_PART_ARM11MPCORE:
220 ret = armv6mpcore_pmu_init(pmu); 219 ret = armv6mpcore_pmu_init(pmu);
221 break; 220 break;
222 case 0xC080: /* Cortex-A8 */ 221 case ARM_CPU_PART_CORTEX_A8:
223 ret = armv7_a8_pmu_init(pmu); 222 ret = armv7_a8_pmu_init(pmu);
224 break; 223 break;
225 case 0xC090: /* Cortex-A9 */ 224 case ARM_CPU_PART_CORTEX_A9:
226 ret = armv7_a9_pmu_init(pmu); 225 ret = armv7_a9_pmu_init(pmu);
227 break; 226 break;
228 case 0xC050: /* Cortex-A5 */ 227 case ARM_CPU_PART_CORTEX_A5:
229 ret = armv7_a5_pmu_init(pmu); 228 ret = armv7_a5_pmu_init(pmu);
230 break; 229 break;
231 case 0xC0F0: /* Cortex-A15 */ 230 case ARM_CPU_PART_CORTEX_A15:
232 ret = armv7_a15_pmu_init(pmu); 231 ret = armv7_a15_pmu_init(pmu);
233 break; 232 break;
234 case 0xC070: /* Cortex-A7 */ 233 case ARM_CPU_PART_CORTEX_A7:
235 ret = armv7_a7_pmu_init(pmu); 234 ret = armv7_a7_pmu_init(pmu);
236 break; 235 break;
237 } 236 }
238 /* Intel CPUs [xscale]. */ 237 /* Intel CPUs [xscale]. */
239 } else if (0x69 == implementor) { 238 } else if (implementor == ARM_CPU_IMP_INTEL) {
240 part_number = (cpuid >> 13) & 0x7; 239 switch (xscale_cpu_arch_version()) {
241 switch (part_number) { 240 case ARM_CPU_XSCALE_ARCH_V1:
242 case 1:
243 ret = xscale1pmu_init(pmu); 241 ret = xscale1pmu_init(pmu);
244 break; 242 break;
245 case 2: 243 case ARM_CPU_XSCALE_ARCH_V2:
246 ret = xscale2pmu_init(pmu); 244 ret = xscale2pmu_init(pmu);
247 break; 245 break;
248 } 246 }
@@ -279,17 +277,22 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
279 } 277 }
280 278
281 if (ret) { 279 if (ret) {
282 pr_info("failed to register PMU devices!"); 280 pr_info("failed to probe PMU!");
283 kfree(pmu); 281 goto out_free;
284 return ret;
285 } 282 }
286 283
287 cpu_pmu = pmu; 284 cpu_pmu = pmu;
288 cpu_pmu->plat_device = pdev; 285 cpu_pmu->plat_device = pdev;
289 cpu_pmu_init(cpu_pmu); 286 cpu_pmu_init(cpu_pmu);
290 armpmu_register(cpu_pmu, PERF_TYPE_RAW); 287 ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
291 288
292 return 0; 289 if (!ret)
290 return 0;
291
292out_free:
293 pr_info("failed to register PMU devices!");
294 kfree(pmu);
295 return ret;
293} 296}
294 297
295static struct platform_driver cpu_pmu_driver = { 298static struct platform_driver cpu_pmu_driver = {
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 041d0526a288..03664b0e8fa4 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -106,7 +106,7 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
106 }, 106 },
107 [C(OP_WRITE)] = { 107 [C(OP_WRITE)] = {
108 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 108 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
109 [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, 109 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
110 }, 110 },
111 [C(OP_PREFETCH)] = { 111 [C(OP_PREFETCH)] = {
112 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 112 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -259,7 +259,7 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
259 }, 259 },
260 [C(OP_WRITE)] = { 260 [C(OP_WRITE)] = {
261 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 261 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
262 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, 262 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
263 }, 263 },
264 [C(OP_PREFETCH)] = { 264 [C(OP_PREFETCH)] = {
265 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 265 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4fbc757d9cff..8c79a9e70b83 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -157,8 +157,8 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
157 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 157 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
158 }, 158 },
159 [C(OP_WRITE)] = { 159 [C(OP_WRITE)] = {
160 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, 160 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
161 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 161 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
162 }, 162 },
163 [C(OP_PREFETCH)] = { 163 [C(OP_PREFETCH)] = {
164 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 164 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -282,7 +282,7 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
282 }, 282 },
283 [C(OP_WRITE)] = { 283 [C(OP_WRITE)] = {
284 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 284 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
285 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 285 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
286 }, 286 },
287 [C(OP_PREFETCH)] = { 287 [C(OP_PREFETCH)] = {
288 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 288 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -399,8 +399,8 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
399 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 399 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
400 }, 400 },
401 [C(OP_WRITE)] = { 401 [C(OP_WRITE)] = {
402 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 402 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
403 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 403 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
404 }, 404 },
405 /* 405 /*
406 * The prefetch counters don't differentiate between the I 406 * The prefetch counters don't differentiate between the I
@@ -527,8 +527,8 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
527 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 527 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
528 }, 528 },
529 [C(OP_WRITE)] = { 529 [C(OP_WRITE)] = {
530 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 530 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
531 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 531 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
532 }, 532 },
533 [C(OP_PREFETCH)] = { 533 [C(OP_PREFETCH)] = {
534 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 534 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -651,8 +651,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
651 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 651 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
652 }, 652 },
653 [C(OP_WRITE)] = { 653 [C(OP_WRITE)] = {
654 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 654 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
655 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 655 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
656 }, 656 },
657 [C(OP_PREFETCH)] = { 657 [C(OP_PREFETCH)] = {
658 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 658 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 2b0fe30ec12e..63990c42fac9 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -83,7 +83,7 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
83 }, 83 },
84 [C(OP_WRITE)] = { 84 [C(OP_WRITE)] = {
85 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 85 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
86 [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, 86 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
87 }, 87 },
88 [C(OP_PREFETCH)] = { 88 [C(OP_PREFETCH)] = {
89 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 89 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index c6dec5fc20aa..047d3e40e470 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -172,14 +172,9 @@ static void default_idle(void)
172 local_irq_enable(); 172 local_irq_enable();
173} 173}
174 174
175void (*pm_idle)(void) = default_idle;
176EXPORT_SYMBOL(pm_idle);
177
178/* 175/*
179 * The idle thread, has rather strange semantics for calling pm_idle, 176 * The idle thread.
180 * but this is what x86 does and we need to do the same, so that 177 * We always respect 'hlt_counter' to prevent low power idle.
181 * things like cpuidle get called in the same way. The only difference
182 * is that we always respect 'hlt_counter' to prevent low power idle.
183 */ 178 */
184void cpu_idle(void) 179void cpu_idle(void)
185{ 180{
@@ -210,10 +205,10 @@ void cpu_idle(void)
210 } else if (!need_resched()) { 205 } else if (!need_resched()) {
211 stop_critical_timings(); 206 stop_critical_timings();
212 if (cpuidle_idle_call()) 207 if (cpuidle_idle_call())
213 pm_idle(); 208 default_idle();
214 start_critical_timings(); 209 start_critical_timings();
215 /* 210 /*
216 * pm_idle functions must always 211 * default_idle functions must always
217 * return with IRQs enabled. 212 * return with IRQs enabled.
218 */ 213 */
219 WARN_ON(irqs_disabled()); 214 WARN_ON(irqs_disabled());
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
new file mode 100644
index 000000000000..36531643cc2c
--- /dev/null
+++ b/arch/arm/kernel/psci.c
@@ -0,0 +1,211 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2012 ARM Limited
12 *
13 * Author: Will Deacon <will.deacon@arm.com>
14 */
15
16#define pr_fmt(fmt) "psci: " fmt
17
18#include <linux/init.h>
19#include <linux/of.h>
20
21#include <asm/compiler.h>
22#include <asm/errno.h>
23#include <asm/opcodes-sec.h>
24#include <asm/opcodes-virt.h>
25#include <asm/psci.h>
26
27struct psci_operations psci_ops;
28
29static int (*invoke_psci_fn)(u32, u32, u32, u32);
30
31enum psci_function {
32 PSCI_FN_CPU_SUSPEND,
33 PSCI_FN_CPU_ON,
34 PSCI_FN_CPU_OFF,
35 PSCI_FN_MIGRATE,
36 PSCI_FN_MAX,
37};
38
39static u32 psci_function_id[PSCI_FN_MAX];
40
41#define PSCI_RET_SUCCESS 0
42#define PSCI_RET_EOPNOTSUPP -1
43#define PSCI_RET_EINVAL -2
44#define PSCI_RET_EPERM -3
45
46static int psci_to_linux_errno(int errno)
47{
48 switch (errno) {
49 case PSCI_RET_SUCCESS:
50 return 0;
51 case PSCI_RET_EOPNOTSUPP:
52 return -EOPNOTSUPP;
53 case PSCI_RET_EINVAL:
54 return -EINVAL;
55 case PSCI_RET_EPERM:
56 return -EPERM;
57 };
58
59 return -EINVAL;
60}
61
62#define PSCI_POWER_STATE_ID_MASK 0xffff
63#define PSCI_POWER_STATE_ID_SHIFT 0
64#define PSCI_POWER_STATE_TYPE_MASK 0x1
65#define PSCI_POWER_STATE_TYPE_SHIFT 16
66#define PSCI_POWER_STATE_AFFL_MASK 0x3
67#define PSCI_POWER_STATE_AFFL_SHIFT 24
68
69static u32 psci_power_state_pack(struct psci_power_state state)
70{
71 return ((state.id & PSCI_POWER_STATE_ID_MASK)
72 << PSCI_POWER_STATE_ID_SHIFT) |
73 ((state.type & PSCI_POWER_STATE_TYPE_MASK)
74 << PSCI_POWER_STATE_TYPE_SHIFT) |
75 ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK)
76 << PSCI_POWER_STATE_AFFL_SHIFT);
77}
78
79/*
80 * The following two functions are invoked via the invoke_psci_fn pointer
81 * and will not be inlined, allowing us to piggyback on the AAPCS.
82 */
83static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1,
84 u32 arg2)
85{
86 asm volatile(
87 __asmeq("%0", "r0")
88 __asmeq("%1", "r1")
89 __asmeq("%2", "r2")
90 __asmeq("%3", "r3")
91 __HVC(0)
92 : "+r" (function_id)
93 : "r" (arg0), "r" (arg1), "r" (arg2));
94
95 return function_id;
96}
97
98static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1,
99 u32 arg2)
100{
101 asm volatile(
102 __asmeq("%0", "r0")
103 __asmeq("%1", "r1")
104 __asmeq("%2", "r2")
105 __asmeq("%3", "r3")
106 __SMC(0)
107 : "+r" (function_id)
108 : "r" (arg0), "r" (arg1), "r" (arg2));
109
110 return function_id;
111}
112
113static int psci_cpu_suspend(struct psci_power_state state,
114 unsigned long entry_point)
115{
116 int err;
117 u32 fn, power_state;
118
119 fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
120 power_state = psci_power_state_pack(state);
121 err = invoke_psci_fn(fn, power_state, entry_point, 0);
122 return psci_to_linux_errno(err);
123}
124
125static int psci_cpu_off(struct psci_power_state state)
126{
127 int err;
128 u32 fn, power_state;
129
130 fn = psci_function_id[PSCI_FN_CPU_OFF];
131 power_state = psci_power_state_pack(state);
132 err = invoke_psci_fn(fn, power_state, 0, 0);
133 return psci_to_linux_errno(err);
134}
135
136static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
137{
138 int err;
139 u32 fn;
140
141 fn = psci_function_id[PSCI_FN_CPU_ON];
142 err = invoke_psci_fn(fn, cpuid, entry_point, 0);
143 return psci_to_linux_errno(err);
144}
145
146static int psci_migrate(unsigned long cpuid)
147{
148 int err;
149 u32 fn;
150
151 fn = psci_function_id[PSCI_FN_MIGRATE];
152 err = invoke_psci_fn(fn, cpuid, 0, 0);
153 return psci_to_linux_errno(err);
154}
155
156static const struct of_device_id psci_of_match[] __initconst = {
157 { .compatible = "arm,psci", },
158 {},
159};
160
161static int __init psci_init(void)
162{
163 struct device_node *np;
164 const char *method;
165 u32 id;
166
167 np = of_find_matching_node(NULL, psci_of_match);
168 if (!np)
169 return 0;
170
171 pr_info("probing function IDs from device-tree\n");
172
173 if (of_property_read_string(np, "method", &method)) {
174 pr_warning("missing \"method\" property\n");
175 goto out_put_node;
176 }
177
178 if (!strcmp("hvc", method)) {
179 invoke_psci_fn = __invoke_psci_fn_hvc;
180 } else if (!strcmp("smc", method)) {
181 invoke_psci_fn = __invoke_psci_fn_smc;
182 } else {
183 pr_warning("invalid \"method\" property: %s\n", method);
184 goto out_put_node;
185 }
186
187 if (!of_property_read_u32(np, "cpu_suspend", &id)) {
188 psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
189 psci_ops.cpu_suspend = psci_cpu_suspend;
190 }
191
192 if (!of_property_read_u32(np, "cpu_off", &id)) {
193 psci_function_id[PSCI_FN_CPU_OFF] = id;
194 psci_ops.cpu_off = psci_cpu_off;
195 }
196
197 if (!of_property_read_u32(np, "cpu_on", &id)) {
198 psci_function_id[PSCI_FN_CPU_ON] = id;
199 psci_ops.cpu_on = psci_cpu_on;
200 }
201
202 if (!of_property_read_u32(np, "migrate", &id)) {
203 psci_function_id[PSCI_FN_MIGRATE] = id;
204 psci_ops.migrate = psci_migrate;
205 }
206
207out_put_node:
208 of_node_put(np);
209 return 0;
210}
211early_initcall(psci_init);
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index fc6692e2b603..bd6f56b9ec21 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -93,11 +93,11 @@ static void notrace update_sched_clock(void)
93 * detectable in cyc_to_fixed_sched_clock(). 93 * detectable in cyc_to_fixed_sched_clock().
94 */ 94 */
95 raw_local_irq_save(flags); 95 raw_local_irq_save(flags);
96 cd.epoch_cyc = cyc; 96 cd.epoch_cyc_copy = cyc;
97 smp_wmb(); 97 smp_wmb();
98 cd.epoch_ns = ns; 98 cd.epoch_ns = ns;
99 smp_wmb(); 99 smp_wmb();
100 cd.epoch_cyc_copy = cyc; 100 cd.epoch_cyc = cyc;
101 raw_local_irq_restore(flags); 101 raw_local_irq_restore(flags);
102} 102}
103 103
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 84f4cbf652e5..365c8d92e2eb 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -125,18 +125,6 @@ void __init smp_init_cpus(void)
125 smp_ops.smp_init_cpus(); 125 smp_ops.smp_init_cpus();
126} 126}
127 127
128static void __init platform_smp_prepare_cpus(unsigned int max_cpus)
129{
130 if (smp_ops.smp_prepare_cpus)
131 smp_ops.smp_prepare_cpus(max_cpus);
132}
133
134static void __cpuinit platform_secondary_init(unsigned int cpu)
135{
136 if (smp_ops.smp_secondary_init)
137 smp_ops.smp_secondary_init(cpu);
138}
139
140int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) 128int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
141{ 129{
142 if (smp_ops.smp_boot_secondary) 130 if (smp_ops.smp_boot_secondary)
@@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu)
154 return 1; 142 return 1;
155} 143}
156 144
157static void platform_cpu_die(unsigned int cpu)
158{
159 if (smp_ops.cpu_die)
160 smp_ops.cpu_die(cpu);
161}
162
163static int platform_cpu_disable(unsigned int cpu) 145static int platform_cpu_disable(unsigned int cpu)
164{ 146{
165 if (smp_ops.cpu_disable) 147 if (smp_ops.cpu_disable)
@@ -257,7 +239,8 @@ void __ref cpu_die(void)
257 * actual CPU shutdown procedure is at least platform (if not 239 * actual CPU shutdown procedure is at least platform (if not
258 * CPU) specific. 240 * CPU) specific.
259 */ 241 */
260 platform_cpu_die(cpu); 242 if (smp_ops.cpu_die)
243 smp_ops.cpu_die(cpu);
261 244
262 /* 245 /*
263 * Do not return to the idle loop - jump back to the secondary 246 * Do not return to the idle loop - jump back to the secondary
@@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
324 /* 307 /*
325 * Give the platform a chance to do its own initialisation. 308 * Give the platform a chance to do its own initialisation.
326 */ 309 */
327 platform_secondary_init(cpu); 310 if (smp_ops.smp_secondary_init)
311 smp_ops.smp_secondary_init(cpu);
328 312
329 notify_cpu_starting(cpu); 313 notify_cpu_starting(cpu);
330 314
@@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
399 /* 383 /*
400 * Initialise the present map, which describes the set of CPUs 384 * Initialise the present map, which describes the set of CPUs
401 * actually populated at the present time. A platform should 385 * actually populated at the present time. A platform should
402 * re-initialize the map in platform_smp_prepare_cpus() if 386 * re-initialize the map in the platforms smp_prepare_cpus()
403 * present != possible (e.g. physical hotplug). 387 * if present != possible (e.g. physical hotplug).
404 */ 388 */
405 init_cpu_present(cpu_possible_mask); 389 init_cpu_present(cpu_possible_mask);
406 390
@@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
408 * Initialise the SCU if there are more than one CPU 392 * Initialise the SCU if there are more than one CPU
409 * and let them know where to start. 393 * and let them know where to start.
410 */ 394 */
411 platform_smp_prepare_cpus(max_cpus); 395 if (smp_ops.smp_prepare_cpus)
396 smp_ops.smp_prepare_cpus(max_cpus);
412 } 397 }
413} 398}
414 399
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index b9f015e843d8..45eac87ed66a 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base)
75int scu_power_mode(void __iomem *scu_base, unsigned int mode) 75int scu_power_mode(void __iomem *scu_base, unsigned int mode)
76{ 76{
77 unsigned int val; 77 unsigned int val;
78 int cpu = cpu_logical_map(smp_processor_id()); 78 int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
79 79
80 if (mode > 3 || mode == 1 || cpu > 3) 80 if (mode > 3 || mode == 1 || cpu > 3)
81 return -EINVAL; 81 return -EINVAL;
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 49f335d301ba..ae0c7bb39ae8 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -31,7 +31,6 @@ static void __iomem *twd_base;
31 31
32static struct clk *twd_clk; 32static struct clk *twd_clk;
33static unsigned long twd_timer_rate; 33static unsigned long twd_timer_rate;
34static bool common_setup_called;
35static DEFINE_PER_CPU(bool, percpu_setup_called); 34static DEFINE_PER_CPU(bool, percpu_setup_called);
36 35
37static struct clock_event_device __percpu **twd_evt; 36static struct clock_event_device __percpu **twd_evt;
@@ -239,25 +238,28 @@ static irqreturn_t twd_handler(int irq, void *dev_id)
239 return IRQ_NONE; 238 return IRQ_NONE;
240} 239}
241 240
242static struct clk *twd_get_clock(void) 241static void twd_get_clock(struct device_node *np)
243{ 242{
244 struct clk *clk;
245 int err; 243 int err;
246 244
247 clk = clk_get_sys("smp_twd", NULL); 245 if (np)
248 if (IS_ERR(clk)) { 246 twd_clk = of_clk_get(np, 0);
249 pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk)); 247 else
250 return clk; 248 twd_clk = clk_get_sys("smp_twd", NULL);
249
250 if (IS_ERR(twd_clk)) {
251 pr_err("smp_twd: clock not found %d\n", (int) PTR_ERR(twd_clk));
252 return;
251 } 253 }
252 254
253 err = clk_prepare_enable(clk); 255 err = clk_prepare_enable(twd_clk);
254 if (err) { 256 if (err) {
255 pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); 257 pr_err("smp_twd: clock failed to prepare+enable: %d\n", err);
256 clk_put(clk); 258 clk_put(twd_clk);
257 return ERR_PTR(err); 259 return;
258 } 260 }
259 261
260 return clk; 262 twd_timer_rate = clk_get_rate(twd_clk);
261} 263}
262 264
263/* 265/*
@@ -280,26 +282,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
280 } 282 }
281 per_cpu(percpu_setup_called, cpu) = true; 283 per_cpu(percpu_setup_called, cpu) = true;
282 284
283 /* 285 twd_calibrate_rate();
284 * This stuff only need to be done once for the entire TWD cluster
285 * during the runtime of the system.
286 */
287 if (!common_setup_called) {
288 twd_clk = twd_get_clock();
289
290 /*
291 * We use IS_ERR_OR_NULL() here, because if the clock stubs
292 * are active we will get a valid clk reference which is
293 * however NULL and will return the rate 0. In that case we
294 * need to calibrate the rate instead.
295 */
296 if (!IS_ERR_OR_NULL(twd_clk))
297 twd_timer_rate = clk_get_rate(twd_clk);
298 else
299 twd_calibrate_rate();
300
301 common_setup_called = true;
302 }
303 286
304 /* 287 /*
305 * The following is done once per CPU the first time .setup() is 288 * The following is done once per CPU the first time .setup() is
@@ -330,7 +313,7 @@ static struct local_timer_ops twd_lt_ops __cpuinitdata = {
330 .stop = twd_timer_stop, 313 .stop = twd_timer_stop,
331}; 314};
332 315
333static int __init twd_local_timer_common_register(void) 316static int __init twd_local_timer_common_register(struct device_node *np)
334{ 317{
335 int err; 318 int err;
336 319
@@ -350,6 +333,8 @@ static int __init twd_local_timer_common_register(void)
350 if (err) 333 if (err)
351 goto out_irq; 334 goto out_irq;
352 335
336 twd_get_clock(np);
337
353 return 0; 338 return 0;
354 339
355out_irq: 340out_irq:
@@ -373,7 +358,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt)
373 if (!twd_base) 358 if (!twd_base)
374 return -ENOMEM; 359 return -ENOMEM;
375 360
376 return twd_local_timer_common_register(); 361 return twd_local_timer_common_register(NULL);
377} 362}
378 363
379#ifdef CONFIG_OF 364#ifdef CONFIG_OF
@@ -405,7 +390,7 @@ void __init twd_local_timer_of_register(void)
405 goto out; 390 goto out;
406 } 391 }
407 392
408 err = twd_local_timer_common_register(); 393 err = twd_local_timer_common_register(np);
409 394
410out: 395out:
411 WARN(err, "twd_local_timer_of_register failed (%d)\n", err); 396 WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 11c1785bf63e..b571484e9f03 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -19,7 +19,11 @@
19 ALIGN_FUNCTION(); \ 19 ALIGN_FUNCTION(); \
20 VMLINUX_SYMBOL(__idmap_text_start) = .; \ 20 VMLINUX_SYMBOL(__idmap_text_start) = .; \
21 *(.idmap.text) \ 21 *(.idmap.text) \
22 VMLINUX_SYMBOL(__idmap_text_end) = .; 22 VMLINUX_SYMBOL(__idmap_text_end) = .; \
23 ALIGN_FUNCTION(); \
24 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
25 *(.hyp.idmap.text) \
26 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
23 27
24#ifdef CONFIG_HOTPLUG_CPU 28#ifdef CONFIG_HOTPLUG_CPU
25#define ARM_CPU_DISCARD(x) 29#define ARM_CPU_DISCARD(x)
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
new file mode 100644
index 000000000000..05227cb57a7b
--- /dev/null
+++ b/arch/arm/kvm/Kconfig
@@ -0,0 +1,56 @@
1#
2# KVM configuration
3#
4
5source "virt/kvm/Kconfig"
6
7menuconfig VIRTUALIZATION
8 bool "Virtualization"
9 ---help---
10 Say Y here to get to see options for using your Linux host to run
11 other operating systems inside virtual machines (guests).
12 This option alone does not add any kernel code.
13
14 If you say N, all options in this submenu will be skipped and
15 disabled.
16
17if VIRTUALIZATION
18
19config KVM
20 bool "Kernel-based Virtual Machine (KVM) support"
21 select PREEMPT_NOTIFIERS
22 select ANON_INODES
23 select KVM_MMIO
24 select KVM_ARM_HOST
25 depends on ARM_VIRT_EXT && ARM_LPAE
26 ---help---
27 Support hosting virtualized guest machines. You will also
28 need to select one or more of the processor modules below.
29
30 This module provides access to the hardware capabilities through
31 a character device node named /dev/kvm.
32
33 If unsure, say N.
34
35config KVM_ARM_HOST
36 bool "KVM host support for ARM cpus."
37 depends on KVM
38 depends on MMU
39 select MMU_NOTIFIER
40 ---help---
41 Provides host support for ARM processors.
42
43config KVM_ARM_MAX_VCPUS
44 int "Number maximum supported virtual CPUs per VM"
45 depends on KVM_ARM_HOST
46 default 4
47 help
48 Static number of max supported virtual CPUs per VM.
49
50 If you choose a high number, the vcpu structures will be quite
51 large, so only choose a reasonable number that you expect to
52 actually use.
53
54source drivers/virtio/Kconfig
55
56endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
new file mode 100644
index 000000000000..ea27987bd07f
--- /dev/null
+++ b/arch/arm/kvm/Makefile
@@ -0,0 +1,21 @@
1#
2# Makefile for Kernel-based Virtual Machine module
3#
4
5plus_virt := $(call as-instr,.arch_extension virt,+virt)
6ifeq ($(plus_virt),+virt)
7 plus_virt_def := -DREQUIRES_VIRT=1
8endif
9
10ccflags-y += -Ivirt/kvm -Iarch/arm/kvm
11CFLAGS_arm.o := -I. $(plus_virt_def)
12CFLAGS_mmu.o := -I.
13
14AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
15AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
16
17kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
18
19obj-y += kvm-arm.o init.o interrupts.o
20obj-y += arm.o guest.o mmu.o emulate.o reset.o
21obj-y += coproc.o coproc_a15.o mmio.o psci.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
new file mode 100644
index 000000000000..2d30e3afdaf9
--- /dev/null
+++ b/arch/arm/kvm/arm.c
@@ -0,0 +1,1015 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/kvm_host.h>
22#include <linux/module.h>
23#include <linux/vmalloc.h>
24#include <linux/fs.h>
25#include <linux/mman.h>
26#include <linux/sched.h>
27#include <linux/kvm.h>
28#include <trace/events/kvm.h>
29
30#define CREATE_TRACE_POINTS
31#include "trace.h"
32
33#include <asm/unified.h>
34#include <asm/uaccess.h>
35#include <asm/ptrace.h>
36#include <asm/mman.h>
37#include <asm/cputype.h>
38#include <asm/tlbflush.h>
39#include <asm/cacheflush.h>
40#include <asm/virt.h>
41#include <asm/kvm_arm.h>
42#include <asm/kvm_asm.h>
43#include <asm/kvm_mmu.h>
44#include <asm/kvm_emulate.h>
45#include <asm/kvm_coproc.h>
46#include <asm/kvm_psci.h>
47#include <asm/opcodes.h>
48
49#ifdef REQUIRES_VIRT
50__asm__(".arch_extension virt");
51#endif
52
53static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
54static struct vfp_hard_struct __percpu *kvm_host_vfp_state;
55static unsigned long hyp_default_vectors;
56
57/* The VMID used in the VTTBR */
58static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
59static u8 kvm_next_vmid;
60static DEFINE_SPINLOCK(kvm_vmid_lock);
61
62int kvm_arch_hardware_enable(void *garbage)
63{
64 return 0;
65}
66
67int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
68{
69 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
70}
71
72void kvm_arch_hardware_disable(void *garbage)
73{
74}
75
76int kvm_arch_hardware_setup(void)
77{
78 return 0;
79}
80
81void kvm_arch_hardware_unsetup(void)
82{
83}
84
85void kvm_arch_check_processor_compat(void *rtn)
86{
87 *(int *)rtn = 0;
88}
89
90void kvm_arch_sync_events(struct kvm *kvm)
91{
92}
93
94/**
95 * kvm_arch_init_vm - initializes a VM data structure
96 * @kvm: pointer to the KVM struct
97 */
98int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
99{
100 int ret = 0;
101
102 if (type)
103 return -EINVAL;
104
105 ret = kvm_alloc_stage2_pgd(kvm);
106 if (ret)
107 goto out_fail_alloc;
108
109 ret = create_hyp_mappings(kvm, kvm + 1);
110 if (ret)
111 goto out_free_stage2_pgd;
112
113 /* Mark the initial VMID generation invalid */
114 kvm->arch.vmid_gen = 0;
115
116 return ret;
117out_free_stage2_pgd:
118 kvm_free_stage2_pgd(kvm);
119out_fail_alloc:
120 return ret;
121}
122
123int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
124{
125 return VM_FAULT_SIGBUS;
126}
127
128void kvm_arch_free_memslot(struct kvm_memory_slot *free,
129 struct kvm_memory_slot *dont)
130{
131}
132
133int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
134{
135 return 0;
136}
137
138/**
139 * kvm_arch_destroy_vm - destroy the VM data structure
140 * @kvm: pointer to the KVM struct
141 */
142void kvm_arch_destroy_vm(struct kvm *kvm)
143{
144 int i;
145
146 kvm_free_stage2_pgd(kvm);
147
148 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
149 if (kvm->vcpus[i]) {
150 kvm_arch_vcpu_free(kvm->vcpus[i]);
151 kvm->vcpus[i] = NULL;
152 }
153 }
154}
155
156int kvm_dev_ioctl_check_extension(long ext)
157{
158 int r;
159 switch (ext) {
160 case KVM_CAP_USER_MEMORY:
161 case KVM_CAP_SYNC_MMU:
162 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
163 case KVM_CAP_ONE_REG:
164 case KVM_CAP_ARM_PSCI:
165 r = 1;
166 break;
167 case KVM_CAP_COALESCED_MMIO:
168 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
169 break;
170 case KVM_CAP_NR_VCPUS:
171 r = num_online_cpus();
172 break;
173 case KVM_CAP_MAX_VCPUS:
174 r = KVM_MAX_VCPUS;
175 break;
176 default:
177 r = 0;
178 break;
179 }
180 return r;
181}
182
183long kvm_arch_dev_ioctl(struct file *filp,
184 unsigned int ioctl, unsigned long arg)
185{
186 return -EINVAL;
187}
188
189int kvm_arch_set_memory_region(struct kvm *kvm,
190 struct kvm_userspace_memory_region *mem,
191 struct kvm_memory_slot old,
192 int user_alloc)
193{
194 return 0;
195}
196
197int kvm_arch_prepare_memory_region(struct kvm *kvm,
198 struct kvm_memory_slot *memslot,
199 struct kvm_memory_slot old,
200 struct kvm_userspace_memory_region *mem,
201 int user_alloc)
202{
203 return 0;
204}
205
206void kvm_arch_commit_memory_region(struct kvm *kvm,
207 struct kvm_userspace_memory_region *mem,
208 struct kvm_memory_slot old,
209 int user_alloc)
210{
211}
212
213void kvm_arch_flush_shadow_all(struct kvm *kvm)
214{
215}
216
217void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
218 struct kvm_memory_slot *slot)
219{
220}
221
222struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
223{
224 int err;
225 struct kvm_vcpu *vcpu;
226
227 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
228 if (!vcpu) {
229 err = -ENOMEM;
230 goto out;
231 }
232
233 err = kvm_vcpu_init(vcpu, kvm, id);
234 if (err)
235 goto free_vcpu;
236
237 err = create_hyp_mappings(vcpu, vcpu + 1);
238 if (err)
239 goto vcpu_uninit;
240
241 return vcpu;
242vcpu_uninit:
243 kvm_vcpu_uninit(vcpu);
244free_vcpu:
245 kmem_cache_free(kvm_vcpu_cache, vcpu);
246out:
247 return ERR_PTR(err);
248}
249
250int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
251{
252 return 0;
253}
254
255void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
256{
257 kvm_mmu_free_memory_caches(vcpu);
258 kmem_cache_free(kvm_vcpu_cache, vcpu);
259}
260
261void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
262{
263 kvm_arch_vcpu_free(vcpu);
264}
265
266int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
267{
268 return 0;
269}
270
271int __attribute_const__ kvm_target_cpu(void)
272{
273 unsigned long implementor = read_cpuid_implementor();
274 unsigned long part_number = read_cpuid_part_number();
275
276 if (implementor != ARM_CPU_IMP_ARM)
277 return -EINVAL;
278
279 switch (part_number) {
280 case ARM_CPU_PART_CORTEX_A15:
281 return KVM_ARM_TARGET_CORTEX_A15;
282 default:
283 return -EINVAL;
284 }
285}
286
287int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
288{
289 /* Force users to call KVM_ARM_VCPU_INIT */
290 vcpu->arch.target = -1;
291 return 0;
292}
293
294void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
295{
296}
297
298void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
299{
300 vcpu->cpu = cpu;
301 vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state);
302
303 /*
304 * Check whether this vcpu requires the cache to be flushed on
305 * this physical CPU. This is a consequence of doing dcache
306 * operations by set/way on this vcpu. We do it here to be in
307 * a non-preemptible section.
308 */
309 if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
310 flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
311}
312
313void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
314{
315}
316
317int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
318 struct kvm_guest_debug *dbg)
319{
320 return -EINVAL;
321}
322
323
324int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
325 struct kvm_mp_state *mp_state)
326{
327 return -EINVAL;
328}
329
330int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
331 struct kvm_mp_state *mp_state)
332{
333 return -EINVAL;
334}
335
336/**
337 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
338 * @v: The VCPU pointer
339 *
340 * If the guest CPU is not waiting for interrupts or an interrupt line is
341 * asserted, the CPU is by definition runnable.
342 */
343int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
344{
345 return !!v->arch.irq_lines;
346}
347
348/* Just ensure a guest exit from a particular CPU */
349static void exit_vm_noop(void *info)
350{
351}
352
353void force_vm_exit(const cpumask_t *mask)
354{
355 smp_call_function_many(mask, exit_vm_noop, NULL, true);
356}
357
358/**
359 * need_new_vmid_gen - check that the VMID is still valid
360 * @kvm: The VM's VMID to checkt
361 *
362 * return true if there is a new generation of VMIDs being used
363 *
364 * The hardware supports only 256 values with the value zero reserved for the
365 * host, so we check if an assigned value belongs to a previous generation,
366 * which which requires us to assign a new value. If we're the first to use a
367 * VMID for the new generation, we must flush necessary caches and TLBs on all
368 * CPUs.
369 */
370static bool need_new_vmid_gen(struct kvm *kvm)
371{
372 return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
373}
374
375/**
376 * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
377 * @kvm The guest that we are about to run
378 *
379 * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
380 * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
381 * caches and TLBs.
382 */
383static void update_vttbr(struct kvm *kvm)
384{
385 phys_addr_t pgd_phys;
386 u64 vmid;
387
388 if (!need_new_vmid_gen(kvm))
389 return;
390
391 spin_lock(&kvm_vmid_lock);
392
393 /*
394 * We need to re-check the vmid_gen here to ensure that if another vcpu
395 * already allocated a valid vmid for this vm, then this vcpu should
396 * use the same vmid.
397 */
398 if (!need_new_vmid_gen(kvm)) {
399 spin_unlock(&kvm_vmid_lock);
400 return;
401 }
402
403 /* First user of a new VMID generation? */
404 if (unlikely(kvm_next_vmid == 0)) {
405 atomic64_inc(&kvm_vmid_gen);
406 kvm_next_vmid = 1;
407
408 /*
409 * On SMP we know no other CPUs can use this CPU's or each
410 * other's VMID after force_vm_exit returns since the
411 * kvm_vmid_lock blocks them from reentry to the guest.
412 */
413 force_vm_exit(cpu_all_mask);
414 /*
415 * Now broadcast TLB + ICACHE invalidation over the inner
416 * shareable domain to make sure all data structures are
417 * clean.
418 */
419 kvm_call_hyp(__kvm_flush_vm_context);
420 }
421
422 kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
423 kvm->arch.vmid = kvm_next_vmid;
424 kvm_next_vmid++;
425
426 /* update vttbr to be used with the new vmid */
427 pgd_phys = virt_to_phys(kvm->arch.pgd);
428 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
429 kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
430 kvm->arch.vttbr |= vmid;
431
432 spin_unlock(&kvm_vmid_lock);
433}
434
435static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
436{
437 /* SVC called from Hyp mode should never get here */
438 kvm_debug("SVC called from Hyp mode shouldn't go here\n");
439 BUG();
440 return -EINVAL; /* Squash warning */
441}
442
443static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
444{
445 trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
446 vcpu->arch.hsr & HSR_HVC_IMM_MASK);
447
448 if (kvm_psci_call(vcpu))
449 return 1;
450
451 kvm_inject_undefined(vcpu);
452 return 1;
453}
454
455static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
456{
457 if (kvm_psci_call(vcpu))
458 return 1;
459
460 kvm_inject_undefined(vcpu);
461 return 1;
462}
463
464static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
465{
466 /* The hypervisor should never cause aborts */
467 kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
468 vcpu->arch.hxfar, vcpu->arch.hsr);
469 return -EFAULT;
470}
471
472static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
473{
474 /* This is either an error in the ws. code or an external abort */
475 kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
476 vcpu->arch.hxfar, vcpu->arch.hsr);
477 return -EFAULT;
478}
479
480typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
481static exit_handle_fn arm_exit_handlers[] = {
482 [HSR_EC_WFI] = kvm_handle_wfi,
483 [HSR_EC_CP15_32] = kvm_handle_cp15_32,
484 [HSR_EC_CP15_64] = kvm_handle_cp15_64,
485 [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
486 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
487 [HSR_EC_CP14_64] = kvm_handle_cp14_access,
488 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
489 [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
490 [HSR_EC_SVC_HYP] = handle_svc_hyp,
491 [HSR_EC_HVC] = handle_hvc,
492 [HSR_EC_SMC] = handle_smc,
493 [HSR_EC_IABT] = kvm_handle_guest_abort,
494 [HSR_EC_IABT_HYP] = handle_pabt_hyp,
495 [HSR_EC_DABT] = kvm_handle_guest_abort,
496 [HSR_EC_DABT_HYP] = handle_dabt_hyp,
497};
498
499/*
500 * A conditional instruction is allowed to trap, even though it
501 * wouldn't be executed. So let's re-implement the hardware, in
502 * software!
503 */
504static bool kvm_condition_valid(struct kvm_vcpu *vcpu)
505{
506 unsigned long cpsr, cond, insn;
507
508 /*
509 * Exception Code 0 can only happen if we set HCR.TGE to 1, to
510 * catch undefined instructions, and then we won't get past
511 * the arm_exit_handlers test anyway.
512 */
513 BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0);
514
515 /* Top two bits non-zero? Unconditional. */
516 if (vcpu->arch.hsr >> 30)
517 return true;
518
519 cpsr = *vcpu_cpsr(vcpu);
520
521 /* Is condition field valid? */
522 if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT)
523 cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT;
524 else {
525 /* This can happen in Thumb mode: examine IT state. */
526 unsigned long it;
527
528 it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
529
530 /* it == 0 => unconditional. */
531 if (it == 0)
532 return true;
533
534 /* The cond for this insn works out as the top 4 bits. */
535 cond = (it >> 4);
536 }
537
538 /* Shift makes it look like an ARM-mode instruction */
539 insn = cond << 28;
540 return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
541}
542
543/*
544 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
545 * proper exit to QEMU.
546 */
547static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
548 int exception_index)
549{
550 unsigned long hsr_ec;
551
552 switch (exception_index) {
553 case ARM_EXCEPTION_IRQ:
554 return 1;
555 case ARM_EXCEPTION_UNDEFINED:
556 kvm_err("Undefined exception in Hyp mode at: %#08x\n",
557 vcpu->arch.hyp_pc);
558 BUG();
559 panic("KVM: Hypervisor undefined exception!\n");
560 case ARM_EXCEPTION_DATA_ABORT:
561 case ARM_EXCEPTION_PREF_ABORT:
562 case ARM_EXCEPTION_HVC:
563 hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT;
564
565 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers)
566 || !arm_exit_handlers[hsr_ec]) {
567 kvm_err("Unkown exception class: %#08lx, "
568 "hsr: %#08x\n", hsr_ec,
569 (unsigned int)vcpu->arch.hsr);
570 BUG();
571 }
572
573 /*
574 * See ARM ARM B1.14.1: "Hyp traps on instructions
575 * that fail their condition code check"
576 */
577 if (!kvm_condition_valid(vcpu)) {
578 bool is_wide = vcpu->arch.hsr & HSR_IL;
579 kvm_skip_instr(vcpu, is_wide);
580 return 1;
581 }
582
583 return arm_exit_handlers[hsr_ec](vcpu, run);
584 default:
585 kvm_pr_unimpl("Unsupported exception type: %d",
586 exception_index);
587 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
588 return 0;
589 }
590}
591
592static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
593{
594 if (likely(vcpu->arch.has_run_once))
595 return 0;
596
597 vcpu->arch.has_run_once = true;
598
599 /*
600 * Handle the "start in power-off" case by calling into the
601 * PSCI code.
602 */
603 if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) {
604 *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF;
605 kvm_psci_call(vcpu);
606 }
607
608 return 0;
609}
610
611static void vcpu_pause(struct kvm_vcpu *vcpu)
612{
613 wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
614
615 wait_event_interruptible(*wq, !vcpu->arch.pause);
616}
617
618/**
619 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
620 * @vcpu: The VCPU pointer
621 * @run: The kvm_run structure pointer used for userspace state exchange
622 *
623 * This function is called through the VCPU_RUN ioctl called from user space. It
624 * will execute VM code in a loop until the time slice for the process is used
625 * or some emulation is needed from user space in which case the function will
626 * return with return value 0 and with the kvm_run structure filled in with the
627 * required data for the requested emulation.
628 */
629int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
630{
631 int ret;
632 sigset_t sigsaved;
633
634 /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */
635 if (unlikely(vcpu->arch.target < 0))
636 return -ENOEXEC;
637
638 ret = kvm_vcpu_first_run_init(vcpu);
639 if (ret)
640 return ret;
641
642 if (run->exit_reason == KVM_EXIT_MMIO) {
643 ret = kvm_handle_mmio_return(vcpu, vcpu->run);
644 if (ret)
645 return ret;
646 }
647
648 if (vcpu->sigset_active)
649 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
650
651 ret = 1;
652 run->exit_reason = KVM_EXIT_UNKNOWN;
653 while (ret > 0) {
654 /*
655 * Check conditions before entering the guest
656 */
657 cond_resched();
658
659 update_vttbr(vcpu->kvm);
660
661 if (vcpu->arch.pause)
662 vcpu_pause(vcpu);
663
664 local_irq_disable();
665
666 /*
667 * Re-check atomic conditions
668 */
669 if (signal_pending(current)) {
670 ret = -EINTR;
671 run->exit_reason = KVM_EXIT_INTR;
672 }
673
674 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
675 local_irq_enable();
676 continue;
677 }
678
679 /**************************************************************
680 * Enter the guest
681 */
682 trace_kvm_entry(*vcpu_pc(vcpu));
683 kvm_guest_enter();
684 vcpu->mode = IN_GUEST_MODE;
685
686 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
687
688 vcpu->mode = OUTSIDE_GUEST_MODE;
689 vcpu->arch.last_pcpu = smp_processor_id();
690 kvm_guest_exit();
691 trace_kvm_exit(*vcpu_pc(vcpu));
692 /*
693 * We may have taken a host interrupt in HYP mode (ie
694 * while executing the guest). This interrupt is still
695 * pending, as we haven't serviced it yet!
696 *
697 * We're now back in SVC mode, with interrupts
698 * disabled. Enabling the interrupts now will have
699 * the effect of taking the interrupt again, in SVC
700 * mode this time.
701 */
702 local_irq_enable();
703
704 /*
705 * Back from guest
706 *************************************************************/
707
708 ret = handle_exit(vcpu, run, ret);
709 }
710
711 if (vcpu->sigset_active)
712 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
713 return ret;
714}
715
716static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
717{
718 int bit_index;
719 bool set;
720 unsigned long *ptr;
721
722 if (number == KVM_ARM_IRQ_CPU_IRQ)
723 bit_index = __ffs(HCR_VI);
724 else /* KVM_ARM_IRQ_CPU_FIQ */
725 bit_index = __ffs(HCR_VF);
726
727 ptr = (unsigned long *)&vcpu->arch.irq_lines;
728 if (level)
729 set = test_and_set_bit(bit_index, ptr);
730 else
731 set = test_and_clear_bit(bit_index, ptr);
732
733 /*
734 * If we didn't change anything, no need to wake up or kick other CPUs
735 */
736 if (set == level)
737 return 0;
738
739 /*
740 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
741 * trigger a world-switch round on the running physical CPU to set the
742 * virtual IRQ/FIQ fields in the HCR appropriately.
743 */
744 kvm_vcpu_kick(vcpu);
745
746 return 0;
747}
748
749int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level)
750{
751 u32 irq = irq_level->irq;
752 unsigned int irq_type, vcpu_idx, irq_num;
753 int nrcpus = atomic_read(&kvm->online_vcpus);
754 struct kvm_vcpu *vcpu = NULL;
755 bool level = irq_level->level;
756
757 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
758 vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
759 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
760
761 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
762
763 if (irq_type != KVM_ARM_IRQ_TYPE_CPU)
764 return -EINVAL;
765
766 if (vcpu_idx >= nrcpus)
767 return -EINVAL;
768
769 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
770 if (!vcpu)
771 return -EINVAL;
772
773 if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
774 return -EINVAL;
775
776 return vcpu_interrupt_line(vcpu, irq_num, level);
777}
778
779long kvm_arch_vcpu_ioctl(struct file *filp,
780 unsigned int ioctl, unsigned long arg)
781{
782 struct kvm_vcpu *vcpu = filp->private_data;
783 void __user *argp = (void __user *)arg;
784
785 switch (ioctl) {
786 case KVM_ARM_VCPU_INIT: {
787 struct kvm_vcpu_init init;
788
789 if (copy_from_user(&init, argp, sizeof(init)))
790 return -EFAULT;
791
792 return kvm_vcpu_set_target(vcpu, &init);
793
794 }
795 case KVM_SET_ONE_REG:
796 case KVM_GET_ONE_REG: {
797 struct kvm_one_reg reg;
798 if (copy_from_user(&reg, argp, sizeof(reg)))
799 return -EFAULT;
800 if (ioctl == KVM_SET_ONE_REG)
801 return kvm_arm_set_reg(vcpu, &reg);
802 else
803 return kvm_arm_get_reg(vcpu, &reg);
804 }
805 case KVM_GET_REG_LIST: {
806 struct kvm_reg_list __user *user_list = argp;
807 struct kvm_reg_list reg_list;
808 unsigned n;
809
810 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
811 return -EFAULT;
812 n = reg_list.n;
813 reg_list.n = kvm_arm_num_regs(vcpu);
814 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
815 return -EFAULT;
816 if (n < reg_list.n)
817 return -E2BIG;
818 return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
819 }
820 default:
821 return -EINVAL;
822 }
823}
824
825int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
826{
827 return -EINVAL;
828}
829
830long kvm_arch_vm_ioctl(struct file *filp,
831 unsigned int ioctl, unsigned long arg)
832{
833 return -EINVAL;
834}
835
836static void cpu_init_hyp_mode(void *vector)
837{
838 unsigned long long pgd_ptr;
839 unsigned long pgd_low, pgd_high;
840 unsigned long hyp_stack_ptr;
841 unsigned long stack_page;
842 unsigned long vector_ptr;
843
844 /* Switch from the HYP stub to our own HYP init vector */
845 __hyp_set_vectors((unsigned long)vector);
846
847 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
848 pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
849 pgd_high = (pgd_ptr >> 32ULL);
850 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
851 hyp_stack_ptr = stack_page + PAGE_SIZE;
852 vector_ptr = (unsigned long)__kvm_hyp_vector;
853
854 /*
855 * Call initialization code, and switch to the full blown
856 * HYP code. The init code doesn't need to preserve these registers as
857 * r1-r3 and r12 are already callee save according to the AAPCS.
858 * Note that we slightly misuse the prototype by casing the pgd_low to
859 * a void *.
860 */
861 kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
862}
863
864/**
865 * Inits Hyp-mode on all online CPUs
866 */
867static int init_hyp_mode(void)
868{
869 phys_addr_t init_phys_addr;
870 int cpu;
871 int err = 0;
872
873 /*
874 * Allocate Hyp PGD and setup Hyp identity mapping
875 */
876 err = kvm_mmu_init();
877 if (err)
878 goto out_err;
879
880 /*
881 * It is probably enough to obtain the default on one
882 * CPU. It's unlikely to be different on the others.
883 */
884 hyp_default_vectors = __hyp_get_vectors();
885
886 /*
887 * Allocate stack pages for Hypervisor-mode
888 */
889 for_each_possible_cpu(cpu) {
890 unsigned long stack_page;
891
892 stack_page = __get_free_page(GFP_KERNEL);
893 if (!stack_page) {
894 err = -ENOMEM;
895 goto out_free_stack_pages;
896 }
897
898 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
899 }
900
901 /*
902 * Execute the init code on each CPU.
903 *
904 * Note: The stack is not mapped yet, so don't do anything else than
905 * initializing the hypervisor mode on each CPU using a local stack
906 * space for temporary storage.
907 */
908 init_phys_addr = virt_to_phys(__kvm_hyp_init);
909 for_each_online_cpu(cpu) {
910 smp_call_function_single(cpu, cpu_init_hyp_mode,
911 (void *)(long)init_phys_addr, 1);
912 }
913
914 /*
915 * Unmap the identity mapping
916 */
917 kvm_clear_hyp_idmap();
918
919 /*
920 * Map the Hyp-code called directly from the host
921 */
922 err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
923 if (err) {
924 kvm_err("Cannot map world-switch code\n");
925 goto out_free_mappings;
926 }
927
928 /*
929 * Map the Hyp stack pages
930 */
931 for_each_possible_cpu(cpu) {
932 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
933 err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
934
935 if (err) {
936 kvm_err("Cannot map hyp stack\n");
937 goto out_free_mappings;
938 }
939 }
940
941 /*
942 * Map the host VFP structures
943 */
944 kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct);
945 if (!kvm_host_vfp_state) {
946 err = -ENOMEM;
947 kvm_err("Cannot allocate host VFP state\n");
948 goto out_free_mappings;
949 }
950
951 for_each_possible_cpu(cpu) {
952 struct vfp_hard_struct *vfp;
953
954 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
955 err = create_hyp_mappings(vfp, vfp + 1);
956
957 if (err) {
958 kvm_err("Cannot map host VFP state: %d\n", err);
959 goto out_free_vfp;
960 }
961 }
962
963 kvm_info("Hyp mode initialized successfully\n");
964 return 0;
965out_free_vfp:
966 free_percpu(kvm_host_vfp_state);
967out_free_mappings:
968 free_hyp_pmds();
969out_free_stack_pages:
970 for_each_possible_cpu(cpu)
971 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
972out_err:
973 kvm_err("error initializing Hyp mode: %d\n", err);
974 return err;
975}
976
977/**
978 * Initialize Hyp-mode and memory mappings on all CPUs.
979 */
980int kvm_arch_init(void *opaque)
981{
982 int err;
983
984 if (!is_hyp_mode_available()) {
985 kvm_err("HYP mode not available\n");
986 return -ENODEV;
987 }
988
989 if (kvm_target_cpu() < 0) {
990 kvm_err("Target CPU not supported!\n");
991 return -ENODEV;
992 }
993
994 err = init_hyp_mode();
995 if (err)
996 goto out_err;
997
998 kvm_coproc_table_init();
999 return 0;
1000out_err:
1001 return err;
1002}
1003
1004/* NOP: Compiling as a module not supported */
1005void kvm_arch_exit(void)
1006{
1007}
1008
1009static int arm_init(void)
1010{
1011 int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1012 return rc;
1013}
1014
1015module_init(arm_init);
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
new file mode 100644
index 000000000000..d782638c7ec0
--- /dev/null
+++ b/arch/arm/kvm/coproc.c
@@ -0,0 +1,1046 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Authors: Rusty Russell <rusty@rustcorp.com.au>
4 * Christoffer Dall <c.dall@virtualopensystems.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 */
19#include <linux/mm.h>
20#include <linux/kvm_host.h>
21#include <linux/uaccess.h>
22#include <asm/kvm_arm.h>
23#include <asm/kvm_host.h>
24#include <asm/kvm_emulate.h>
25#include <asm/kvm_coproc.h>
26#include <asm/cacheflush.h>
27#include <asm/cputype.h>
28#include <trace/events/kvm.h>
29#include <asm/vfp.h>
30#include "../vfp/vfpinstr.h"
31
32#include "trace.h"
33#include "coproc.h"
34
35
36/******************************************************************************
37 * Co-processor emulation
38 *****************************************************************************/
39
40/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
41static u32 cache_levels;
42
43/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
44#define CSSELR_MAX 12
45
46int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
47{
48 kvm_inject_undefined(vcpu);
49 return 1;
50}
51
52int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
53{
54 /*
55 * We can get here, if the host has been built without VFPv3 support,
56 * but the guest attempted a floating point operation.
57 */
58 kvm_inject_undefined(vcpu);
59 return 1;
60}
61
62int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
63{
64 kvm_inject_undefined(vcpu);
65 return 1;
66}
67
68int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
69{
70 kvm_inject_undefined(vcpu);
71 return 1;
72}
73
74/* See note at ARM ARM B1.14.4 */
75static bool access_dcsw(struct kvm_vcpu *vcpu,
76 const struct coproc_params *p,
77 const struct coproc_reg *r)
78{
79 u32 val;
80 int cpu;
81
82 cpu = get_cpu();
83
84 if (!p->is_write)
85 return read_from_write_only(vcpu, p);
86
87 cpumask_setall(&vcpu->arch.require_dcache_flush);
88 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
89
90 /* If we were already preempted, take the long way around */
91 if (cpu != vcpu->arch.last_pcpu) {
92 flush_cache_all();
93 goto done;
94 }
95
96 val = *vcpu_reg(vcpu, p->Rt1);
97
98 switch (p->CRm) {
99 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
100 case 14: /* DCCISW */
101 asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
102 break;
103
104 case 10: /* DCCSW */
105 asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
106 break;
107 }
108
109done:
110 put_cpu();
111
112 return true;
113}
114
115/*
116 * We could trap ID_DFR0 and tell the guest we don't support performance
117 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
118 * NAKed, so it will read the PMCR anyway.
119 *
120 * Therefore we tell the guest we have 0 counters. Unfortunately, we
121 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
122 * all PM registers, which doesn't crash the guest kernel at least.
123 */
124static bool pm_fake(struct kvm_vcpu *vcpu,
125 const struct coproc_params *p,
126 const struct coproc_reg *r)
127{
128 if (p->is_write)
129 return ignore_write(vcpu, p);
130 else
131 return read_zero(vcpu, p);
132}
133
134#define access_pmcr pm_fake
135#define access_pmcntenset pm_fake
136#define access_pmcntenclr pm_fake
137#define access_pmovsr pm_fake
138#define access_pmselr pm_fake
139#define access_pmceid0 pm_fake
140#define access_pmceid1 pm_fake
141#define access_pmccntr pm_fake
142#define access_pmxevtyper pm_fake
143#define access_pmxevcntr pm_fake
144#define access_pmuserenr pm_fake
145#define access_pmintenset pm_fake
146#define access_pmintenclr pm_fake
147
148/* Architected CP15 registers.
149 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
150 */
151static const struct coproc_reg cp15_regs[] = {
152 /* CSSELR: swapped by interrupt.S. */
153 { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
154 NULL, reset_unknown, c0_CSSELR },
155
156 /* TTBR0/TTBR1: swapped by interrupt.S. */
157 { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
158 { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
159
160 /* TTBCR: swapped by interrupt.S. */
161 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
162 NULL, reset_val, c2_TTBCR, 0x00000000 },
163
164 /* DACR: swapped by interrupt.S. */
165 { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
166 NULL, reset_unknown, c3_DACR },
167
168 /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
169 { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
170 NULL, reset_unknown, c5_DFSR },
171 { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
172 NULL, reset_unknown, c5_IFSR },
173 { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
174 NULL, reset_unknown, c5_ADFSR },
175 { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
176 NULL, reset_unknown, c5_AIFSR },
177
178 /* DFAR/IFAR: swapped by interrupt.S. */
179 { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
180 NULL, reset_unknown, c6_DFAR },
181 { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
182 NULL, reset_unknown, c6_IFAR },
183 /*
184 * DC{C,I,CI}SW operations:
185 */
186 { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
187 { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
188 { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
189 /*
190 * Dummy performance monitor implementation.
191 */
192 { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
193 { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
194 { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
195 { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
196 { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
197 { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
198 { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
199 { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
200 { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
201 { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
202 { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
203 { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
204 { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
205
206 /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
207 { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
208 NULL, reset_unknown, c10_PRRR},
209 { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
210 NULL, reset_unknown, c10_NMRR},
211
212 /* VBAR: swapped by interrupt.S. */
213 { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
214 NULL, reset_val, c12_VBAR, 0x00000000 },
215
216 /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
217 { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
218 NULL, reset_val, c13_CID, 0x00000000 },
219 { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
220 NULL, reset_unknown, c13_TID_URW },
221 { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
222 NULL, reset_unknown, c13_TID_URO },
223 { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
224 NULL, reset_unknown, c13_TID_PRIV },
225};
226
227/* Target specific emulation tables */
228static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
229
230void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
231{
232 target_tables[table->target] = table;
233}
234
235/* Get specific register table for this target. */
236static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
237{
238 struct kvm_coproc_target_table *table;
239
240 table = target_tables[target];
241 *num = table->num;
242 return table->table;
243}
244
245static const struct coproc_reg *find_reg(const struct coproc_params *params,
246 const struct coproc_reg table[],
247 unsigned int num)
248{
249 unsigned int i;
250
251 for (i = 0; i < num; i++) {
252 const struct coproc_reg *r = &table[i];
253
254 if (params->is_64bit != r->is_64)
255 continue;
256 if (params->CRn != r->CRn)
257 continue;
258 if (params->CRm != r->CRm)
259 continue;
260 if (params->Op1 != r->Op1)
261 continue;
262 if (params->Op2 != r->Op2)
263 continue;
264
265 return r;
266 }
267 return NULL;
268}
269
270static int emulate_cp15(struct kvm_vcpu *vcpu,
271 const struct coproc_params *params)
272{
273 size_t num;
274 const struct coproc_reg *table, *r;
275
276 trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
277 params->CRm, params->Op2, params->is_write);
278
279 table = get_target_table(vcpu->arch.target, &num);
280
281 /* Search target-specific then generic table. */
282 r = find_reg(params, table, num);
283 if (!r)
284 r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
285
286 if (likely(r)) {
287 /* If we don't have an accessor, we should never get here! */
288 BUG_ON(!r->access);
289
290 if (likely(r->access(vcpu, params, r))) {
291 /* Skip instruction, since it was emulated */
292 kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
293 return 1;
294 }
295 /* If access function fails, it should complain. */
296 } else {
297 kvm_err("Unsupported guest CP15 access at: %08x\n",
298 *vcpu_pc(vcpu));
299 print_cp_instr(params);
300 }
301 kvm_inject_undefined(vcpu);
302 return 1;
303}
304
305/**
306 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
307 * @vcpu: The VCPU pointer
308 * @run: The kvm_run struct
309 */
310int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
311{
312 struct coproc_params params;
313
314 params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
315 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
316 params.is_write = ((vcpu->arch.hsr & 1) == 0);
317 params.is_64bit = true;
318
319 params.Op1 = (vcpu->arch.hsr >> 16) & 0xf;
320 params.Op2 = 0;
321 params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf;
322 params.CRn = 0;
323
324 return emulate_cp15(vcpu, &params);
325}
326
327static void reset_coproc_regs(struct kvm_vcpu *vcpu,
328 const struct coproc_reg *table, size_t num)
329{
330 unsigned long i;
331
332 for (i = 0; i < num; i++)
333 if (table[i].reset)
334 table[i].reset(vcpu, &table[i]);
335}
336
337/**
338 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
339 * @vcpu: The VCPU pointer
340 * @run: The kvm_run struct
341 */
342int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
343{
344 struct coproc_params params;
345
346 params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
347 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
348 params.is_write = ((vcpu->arch.hsr & 1) == 0);
349 params.is_64bit = false;
350
351 params.CRn = (vcpu->arch.hsr >> 10) & 0xf;
352 params.Op1 = (vcpu->arch.hsr >> 14) & 0x7;
353 params.Op2 = (vcpu->arch.hsr >> 17) & 0x7;
354 params.Rt2 = 0;
355
356 return emulate_cp15(vcpu, &params);
357}
358
359/******************************************************************************
360 * Userspace API
361 *****************************************************************************/
362
363static bool index_to_params(u64 id, struct coproc_params *params)
364{
365 switch (id & KVM_REG_SIZE_MASK) {
366 case KVM_REG_SIZE_U32:
367 /* Any unused index bits means it's not valid. */
368 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
369 | KVM_REG_ARM_COPROC_MASK
370 | KVM_REG_ARM_32_CRN_MASK
371 | KVM_REG_ARM_CRM_MASK
372 | KVM_REG_ARM_OPC1_MASK
373 | KVM_REG_ARM_32_OPC2_MASK))
374 return false;
375
376 params->is_64bit = false;
377 params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
378 >> KVM_REG_ARM_32_CRN_SHIFT);
379 params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
380 >> KVM_REG_ARM_CRM_SHIFT);
381 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
382 >> KVM_REG_ARM_OPC1_SHIFT);
383 params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
384 >> KVM_REG_ARM_32_OPC2_SHIFT);
385 return true;
386 case KVM_REG_SIZE_U64:
387 /* Any unused index bits means it's not valid. */
388 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
389 | KVM_REG_ARM_COPROC_MASK
390 | KVM_REG_ARM_CRM_MASK
391 | KVM_REG_ARM_OPC1_MASK))
392 return false;
393 params->is_64bit = true;
394 params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
395 >> KVM_REG_ARM_CRM_SHIFT);
396 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
397 >> KVM_REG_ARM_OPC1_SHIFT);
398 params->Op2 = 0;
399 params->CRn = 0;
400 return true;
401 default:
402 return false;
403 }
404}
405
406/* Decode an index value, and find the cp15 coproc_reg entry. */
407static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
408 u64 id)
409{
410 size_t num;
411 const struct coproc_reg *table, *r;
412 struct coproc_params params;
413
414 /* We only do cp15 for now. */
415 if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
416 return NULL;
417
418 if (!index_to_params(id, &params))
419 return NULL;
420
421 table = get_target_table(vcpu->arch.target, &num);
422 r = find_reg(&params, table, num);
423 if (!r)
424 r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
425
426 /* Not saved in the cp15 array? */
427 if (r && !r->reg)
428 r = NULL;
429
430 return r;
431}
432
433/*
434 * These are the invariant cp15 registers: we let the guest see the host
435 * versions of these, so they're part of the guest state.
436 *
437 * A future CPU may provide a mechanism to present different values to
438 * the guest, or a future kvm may trap them.
439 */
440/* Unfortunately, there's no register-argument for mrc, so generate. */
441#define FUNCTION_FOR32(crn, crm, op1, op2, name) \
442 static void get_##name(struct kvm_vcpu *v, \
443 const struct coproc_reg *r) \
444 { \
445 u32 val; \
446 \
447 asm volatile("mrc p15, " __stringify(op1) \
448 ", %0, c" __stringify(crn) \
449 ", c" __stringify(crm) \
450 ", " __stringify(op2) "\n" : "=r" (val)); \
451 ((struct coproc_reg *)r)->val = val; \
452 }
453
454FUNCTION_FOR32(0, 0, 0, 0, MIDR)
455FUNCTION_FOR32(0, 0, 0, 1, CTR)
456FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
457FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
458FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
459FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
460FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
461FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
462FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
463FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
464FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
465FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
466FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
467FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
468FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
469FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
470FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
471FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
472FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
473FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
474FUNCTION_FOR32(0, 0, 1, 7, AIDR)
475
476/* ->val is filled in by kvm_invariant_coproc_table_init() */
477static struct coproc_reg invariant_cp15[] = {
478 { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
479 { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
480 { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
481 { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
482 { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
483
484 { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
485 { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
486 { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
487 { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
488 { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
489 { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
490 { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
491 { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
492
493 { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
494 { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
495 { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
496 { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
497 { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
498 { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
499
500 { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
501 { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
502};
503
504static int reg_from_user(void *val, const void __user *uaddr, u64 id)
505{
506 /* This Just Works because we are little endian. */
507 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
508 return -EFAULT;
509 return 0;
510}
511
512static int reg_to_user(void __user *uaddr, const void *val, u64 id)
513{
514 /* This Just Works because we are little endian. */
515 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
516 return -EFAULT;
517 return 0;
518}
519
520static int get_invariant_cp15(u64 id, void __user *uaddr)
521{
522 struct coproc_params params;
523 const struct coproc_reg *r;
524
525 if (!index_to_params(id, &params))
526 return -ENOENT;
527
528 r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
529 if (!r)
530 return -ENOENT;
531
532 return reg_to_user(uaddr, &r->val, id);
533}
534
535static int set_invariant_cp15(u64 id, void __user *uaddr)
536{
537 struct coproc_params params;
538 const struct coproc_reg *r;
539 int err;
540 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
541
542 if (!index_to_params(id, &params))
543 return -ENOENT;
544 r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
545 if (!r)
546 return -ENOENT;
547
548 err = reg_from_user(&val, uaddr, id);
549 if (err)
550 return err;
551
552 /* This is what we mean by invariant: you can't change it. */
553 if (r->val != val)
554 return -EINVAL;
555
556 return 0;
557}
558
559static bool is_valid_cache(u32 val)
560{
561 u32 level, ctype;
562
563 if (val >= CSSELR_MAX)
564 return -ENOENT;
565
566 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
567 level = (val >> 1);
568 ctype = (cache_levels >> (level * 3)) & 7;
569
570 switch (ctype) {
571 case 0: /* No cache */
572 return false;
573 case 1: /* Instruction cache only */
574 return (val & 1);
575 case 2: /* Data cache only */
576 case 4: /* Unified cache */
577 return !(val & 1);
578 case 3: /* Separate instruction and data caches */
579 return true;
580 default: /* Reserved: we can't know instruction or data. */
581 return false;
582 }
583}
584
585/* Which cache CCSIDR represents depends on CSSELR value. */
586static u32 get_ccsidr(u32 csselr)
587{
588 u32 ccsidr;
589
590 /* Make sure noone else changes CSSELR during this! */
591 local_irq_disable();
592 /* Put value into CSSELR */
593 asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
594 isb();
595 /* Read result out of CCSIDR */
596 asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
597 local_irq_enable();
598
599 return ccsidr;
600}
601
602static int demux_c15_get(u64 id, void __user *uaddr)
603{
604 u32 val;
605 u32 __user *uval = uaddr;
606
607 /* Fail if we have unknown bits set. */
608 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
609 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
610 return -ENOENT;
611
612 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
613 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
614 if (KVM_REG_SIZE(id) != 4)
615 return -ENOENT;
616 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
617 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
618 if (!is_valid_cache(val))
619 return -ENOENT;
620
621 return put_user(get_ccsidr(val), uval);
622 default:
623 return -ENOENT;
624 }
625}
626
627static int demux_c15_set(u64 id, void __user *uaddr)
628{
629 u32 val, newval;
630 u32 __user *uval = uaddr;
631
632 /* Fail if we have unknown bits set. */
633 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
634 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
635 return -ENOENT;
636
637 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
638 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
639 if (KVM_REG_SIZE(id) != 4)
640 return -ENOENT;
641 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
642 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
643 if (!is_valid_cache(val))
644 return -ENOENT;
645
646 if (get_user(newval, uval))
647 return -EFAULT;
648
649 /* This is also invariant: you can't change it. */
650 if (newval != get_ccsidr(val))
651 return -EINVAL;
652 return 0;
653 default:
654 return -ENOENT;
655 }
656}
657
658#ifdef CONFIG_VFPv3
659static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
660 KVM_REG_ARM_VFP_FPSCR,
661 KVM_REG_ARM_VFP_FPINST,
662 KVM_REG_ARM_VFP_FPINST2,
663 KVM_REG_ARM_VFP_MVFR0,
664 KVM_REG_ARM_VFP_MVFR1,
665 KVM_REG_ARM_VFP_FPSID };
666
667static unsigned int num_fp_regs(void)
668{
669 if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
670 return 32;
671 else
672 return 16;
673}
674
675static unsigned int num_vfp_regs(void)
676{
677 /* Normal FP regs + control regs. */
678 return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
679}
680
681static int copy_vfp_regids(u64 __user *uindices)
682{
683 unsigned int i;
684 const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
685 const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
686
687 for (i = 0; i < num_fp_regs(); i++) {
688 if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
689 uindices))
690 return -EFAULT;
691 uindices++;
692 }
693
694 for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
695 if (put_user(u32reg | vfp_sysregs[i], uindices))
696 return -EFAULT;
697 uindices++;
698 }
699
700 return num_vfp_regs();
701}
702
703static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
704{
705 u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
706 u32 val;
707
708 /* Fail if we have unknown bits set. */
709 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
710 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
711 return -ENOENT;
712
713 if (vfpid < num_fp_regs()) {
714 if (KVM_REG_SIZE(id) != 8)
715 return -ENOENT;
716 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid],
717 id);
718 }
719
720 /* FP control registers are all 32 bit. */
721 if (KVM_REG_SIZE(id) != 4)
722 return -ENOENT;
723
724 switch (vfpid) {
725 case KVM_REG_ARM_VFP_FPEXC:
726 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id);
727 case KVM_REG_ARM_VFP_FPSCR:
728 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id);
729 case KVM_REG_ARM_VFP_FPINST:
730 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id);
731 case KVM_REG_ARM_VFP_FPINST2:
732 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id);
733 case KVM_REG_ARM_VFP_MVFR0:
734 val = fmrx(MVFR0);
735 return reg_to_user(uaddr, &val, id);
736 case KVM_REG_ARM_VFP_MVFR1:
737 val = fmrx(MVFR1);
738 return reg_to_user(uaddr, &val, id);
739 case KVM_REG_ARM_VFP_FPSID:
740 val = fmrx(FPSID);
741 return reg_to_user(uaddr, &val, id);
742 default:
743 return -ENOENT;
744 }
745}
746
747static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
748{
749 u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
750 u32 val;
751
752 /* Fail if we have unknown bits set. */
753 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
754 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
755 return -ENOENT;
756
757 if (vfpid < num_fp_regs()) {
758 if (KVM_REG_SIZE(id) != 8)
759 return -ENOENT;
760 return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid],
761 uaddr, id);
762 }
763
764 /* FP control registers are all 32 bit. */
765 if (KVM_REG_SIZE(id) != 4)
766 return -ENOENT;
767
768 switch (vfpid) {
769 case KVM_REG_ARM_VFP_FPEXC:
770 return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id);
771 case KVM_REG_ARM_VFP_FPSCR:
772 return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id);
773 case KVM_REG_ARM_VFP_FPINST:
774 return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id);
775 case KVM_REG_ARM_VFP_FPINST2:
776 return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id);
777 /* These are invariant. */
778 case KVM_REG_ARM_VFP_MVFR0:
779 if (reg_from_user(&val, uaddr, id))
780 return -EFAULT;
781 if (val != fmrx(MVFR0))
782 return -EINVAL;
783 return 0;
784 case KVM_REG_ARM_VFP_MVFR1:
785 if (reg_from_user(&val, uaddr, id))
786 return -EFAULT;
787 if (val != fmrx(MVFR1))
788 return -EINVAL;
789 return 0;
790 case KVM_REG_ARM_VFP_FPSID:
791 if (reg_from_user(&val, uaddr, id))
792 return -EFAULT;
793 if (val != fmrx(FPSID))
794 return -EINVAL;
795 return 0;
796 default:
797 return -ENOENT;
798 }
799}
800#else /* !CONFIG_VFPv3 */
801static unsigned int num_vfp_regs(void)
802{
803 return 0;
804}
805
806static int copy_vfp_regids(u64 __user *uindices)
807{
808 return 0;
809}
810
811static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
812{
813 return -ENOENT;
814}
815
816static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
817{
818 return -ENOENT;
819}
820#endif /* !CONFIG_VFPv3 */
821
822int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
823{
824 const struct coproc_reg *r;
825 void __user *uaddr = (void __user *)(long)reg->addr;
826
827 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
828 return demux_c15_get(reg->id, uaddr);
829
830 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
831 return vfp_get_reg(vcpu, reg->id, uaddr);
832
833 r = index_to_coproc_reg(vcpu, reg->id);
834 if (!r)
835 return get_invariant_cp15(reg->id, uaddr);
836
837 /* Note: copies two regs if size is 64 bit. */
838 return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
839}
840
841int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
842{
843 const struct coproc_reg *r;
844 void __user *uaddr = (void __user *)(long)reg->addr;
845
846 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
847 return demux_c15_set(reg->id, uaddr);
848
849 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
850 return vfp_set_reg(vcpu, reg->id, uaddr);
851
852 r = index_to_coproc_reg(vcpu, reg->id);
853 if (!r)
854 return set_invariant_cp15(reg->id, uaddr);
855
856 /* Note: copies two regs if size is 64 bit */
857 return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
858}
859
860static unsigned int num_demux_regs(void)
861{
862 unsigned int i, count = 0;
863
864 for (i = 0; i < CSSELR_MAX; i++)
865 if (is_valid_cache(i))
866 count++;
867
868 return count;
869}
870
871static int write_demux_regids(u64 __user *uindices)
872{
873 u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
874 unsigned int i;
875
876 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
877 for (i = 0; i < CSSELR_MAX; i++) {
878 if (!is_valid_cache(i))
879 continue;
880 if (put_user(val | i, uindices))
881 return -EFAULT;
882 uindices++;
883 }
884 return 0;
885}
886
887static u64 cp15_to_index(const struct coproc_reg *reg)
888{
889 u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
890 if (reg->is_64) {
891 val |= KVM_REG_SIZE_U64;
892 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
893 val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
894 } else {
895 val |= KVM_REG_SIZE_U32;
896 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
897 val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
898 val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
899 val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
900 }
901 return val;
902}
903
904static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
905{
906 if (!*uind)
907 return true;
908
909 if (put_user(cp15_to_index(reg), *uind))
910 return false;
911
912 (*uind)++;
913 return true;
914}
915
916/* Assumed ordered tables, see kvm_coproc_table_init. */
917static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
918{
919 const struct coproc_reg *i1, *i2, *end1, *end2;
920 unsigned int total = 0;
921 size_t num;
922
923 /* We check for duplicates here, to allow arch-specific overrides. */
924 i1 = get_target_table(vcpu->arch.target, &num);
925 end1 = i1 + num;
926 i2 = cp15_regs;
927 end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
928
929 BUG_ON(i1 == end1 || i2 == end2);
930
931 /* Walk carefully, as both tables may refer to the same register. */
932 while (i1 || i2) {
933 int cmp = cmp_reg(i1, i2);
934 /* target-specific overrides generic entry. */
935 if (cmp <= 0) {
936 /* Ignore registers we trap but don't save. */
937 if (i1->reg) {
938 if (!copy_reg_to_user(i1, &uind))
939 return -EFAULT;
940 total++;
941 }
942 } else {
943 /* Ignore registers we trap but don't save. */
944 if (i2->reg) {
945 if (!copy_reg_to_user(i2, &uind))
946 return -EFAULT;
947 total++;
948 }
949 }
950
951 if (cmp <= 0 && ++i1 == end1)
952 i1 = NULL;
953 if (cmp >= 0 && ++i2 == end2)
954 i2 = NULL;
955 }
956 return total;
957}
958
959unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
960{
961 return ARRAY_SIZE(invariant_cp15)
962 + num_demux_regs()
963 + num_vfp_regs()
964 + walk_cp15(vcpu, (u64 __user *)NULL);
965}
966
967int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
968{
969 unsigned int i;
970 int err;
971
972 /* Then give them all the invariant registers' indices. */
973 for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
974 if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
975 return -EFAULT;
976 uindices++;
977 }
978
979 err = walk_cp15(vcpu, uindices);
980 if (err < 0)
981 return err;
982 uindices += err;
983
984 err = copy_vfp_regids(uindices);
985 if (err < 0)
986 return err;
987 uindices += err;
988
989 return write_demux_regids(uindices);
990}
991
992void kvm_coproc_table_init(void)
993{
994 unsigned int i;
995
996 /* Make sure tables are unique and in order. */
997 for (i = 1; i < ARRAY_SIZE(cp15_regs); i++)
998 BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0);
999
1000 /* We abuse the reset function to overwrite the table itself. */
1001 for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
1002 invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
1003
1004 /*
1005 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
1006 *
1007 * If software reads the Cache Type fields from Ctype1
1008 * upwards, once it has seen a value of 0b000, no caches
1009 * exist at further-out levels of the hierarchy. So, for
1010 * example, if Ctype3 is the first Cache Type field with a
1011 * value of 0b000, the values of Ctype4 to Ctype7 must be
1012 * ignored.
1013 */
1014 asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
1015 for (i = 0; i < 7; i++)
1016 if (((cache_levels >> (i*3)) & 7) == 0)
1017 break;
1018 /* Clear all higher bits. */
1019 cache_levels &= (1 << (i*3))-1;
1020}
1021
1022/**
1023 * kvm_reset_coprocs - sets cp15 registers to reset value
1024 * @vcpu: The VCPU pointer
1025 *
1026 * This function finds the right table above and sets the registers on the
1027 * virtual CPU struct to their architecturally defined reset values.
1028 */
1029void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1030{
1031 size_t num;
1032 const struct coproc_reg *table;
1033
1034 /* Catch someone adding a register without putting in reset entry. */
1035 memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15));
1036
1037 /* Generic chip reset first (so target could override). */
1038 reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
1039
1040 table = get_target_table(vcpu->arch.target, &num);
1041 reset_coproc_regs(vcpu, table, num);
1042
1043 for (num = 1; num < NR_CP15_REGS; num++)
1044 if (vcpu->arch.cp15[num] == 0x42424242)
1045 panic("Didn't reset vcpu->arch.cp15[%zi]", num);
1046}
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
new file mode 100644
index 000000000000..992adfafa2ff
--- /dev/null
+++ b/arch/arm/kvm/coproc.h
@@ -0,0 +1,153 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Authors: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_COPROC_LOCAL_H__
20#define __ARM_KVM_COPROC_LOCAL_H__
21
22struct coproc_params {
23 unsigned long CRn;
24 unsigned long CRm;
25 unsigned long Op1;
26 unsigned long Op2;
27 unsigned long Rt1;
28 unsigned long Rt2;
29 bool is_64bit;
30 bool is_write;
31};
32
33struct coproc_reg {
34 /* MRC/MCR/MRRC/MCRR instruction which accesses it. */
35 unsigned long CRn;
36 unsigned long CRm;
37 unsigned long Op1;
38 unsigned long Op2;
39
40 bool is_64;
41
42 /* Trapped access from guest, if non-NULL. */
43 bool (*access)(struct kvm_vcpu *,
44 const struct coproc_params *,
45 const struct coproc_reg *);
46
47 /* Initialization for vcpu. */
48 void (*reset)(struct kvm_vcpu *, const struct coproc_reg *);
49
50 /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */
51 unsigned long reg;
52
53 /* Value (usually reset value) */
54 u64 val;
55};
56
57static inline void print_cp_instr(const struct coproc_params *p)
58{
59 /* Look, we even formatted it for you to paste into the table! */
60 if (p->is_64bit) {
61 kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n",
62 p->CRm, p->Op1, p->is_write ? "write" : "read");
63 } else {
64 kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
65 " func_%s },\n",
66 p->CRn, p->CRm, p->Op1, p->Op2,
67 p->is_write ? "write" : "read");
68 }
69}
70
71static inline bool ignore_write(struct kvm_vcpu *vcpu,
72 const struct coproc_params *p)
73{
74 return true;
75}
76
77static inline bool read_zero(struct kvm_vcpu *vcpu,
78 const struct coproc_params *p)
79{
80 *vcpu_reg(vcpu, p->Rt1) = 0;
81 return true;
82}
83
84static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
85 const struct coproc_params *params)
86{
87 kvm_debug("CP15 write to read-only register at: %08x\n",
88 *vcpu_pc(vcpu));
89 print_cp_instr(params);
90 return false;
91}
92
93static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
94 const struct coproc_params *params)
95{
96 kvm_debug("CP15 read to write-only register at: %08x\n",
97 *vcpu_pc(vcpu));
98 print_cp_instr(params);
99 return false;
100}
101
102/* Reset functions */
103static inline void reset_unknown(struct kvm_vcpu *vcpu,
104 const struct coproc_reg *r)
105{
106 BUG_ON(!r->reg);
107 BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
108 vcpu->arch.cp15[r->reg] = 0xdecafbad;
109}
110
111static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
112{
113 BUG_ON(!r->reg);
114 BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
115 vcpu->arch.cp15[r->reg] = r->val;
116}
117
118static inline void reset_unknown64(struct kvm_vcpu *vcpu,
119 const struct coproc_reg *r)
120{
121 BUG_ON(!r->reg);
122 BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15));
123
124 vcpu->arch.cp15[r->reg] = 0xdecafbad;
125 vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee;
126}
127
128static inline int cmp_reg(const struct coproc_reg *i1,
129 const struct coproc_reg *i2)
130{
131 BUG_ON(i1 == i2);
132 if (!i1)
133 return 1;
134 else if (!i2)
135 return -1;
136 if (i1->CRn != i2->CRn)
137 return i1->CRn - i2->CRn;
138 if (i1->CRm != i2->CRm)
139 return i1->CRm - i2->CRm;
140 if (i1->Op1 != i2->Op1)
141 return i1->Op1 - i2->Op1;
142 return i1->Op2 - i2->Op2;
143}
144
145
146#define CRn(_x) .CRn = _x
147#define CRm(_x) .CRm = _x
148#define Op1(_x) .Op1 = _x
149#define Op2(_x) .Op2 = _x
150#define is64 .is_64 = true
151#define is32 .is_64 = false
152
153#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
new file mode 100644
index 000000000000..685063a6d0cf
--- /dev/null
+++ b/arch/arm/kvm/coproc_a15.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Authors: Rusty Russell <rusty@rustcorp.au>
4 * Christoffer Dall <c.dall@virtualopensystems.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 */
19#include <linux/kvm_host.h>
20#include <asm/cputype.h>
21#include <asm/kvm_arm.h>
22#include <asm/kvm_host.h>
23#include <asm/kvm_emulate.h>
24#include <asm/kvm_coproc.h>
25#include <linux/init.h>
26
27static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
28{
29 /*
30 * Compute guest MPIDR:
31 * (Even if we present only one VCPU to the guest on an SMP
32 * host we don't set the U bit in the MPIDR, or vice versa, as
33 * revealing the underlying hardware properties is likely to
34 * be the best choice).
35 */
36 vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK)
37 | (vcpu->vcpu_id & MPIDR_LEVEL_MASK);
38}
39
40#include "coproc.h"
41
42/* A15 TRM 4.3.28: RO WI */
43static bool access_actlr(struct kvm_vcpu *vcpu,
44 const struct coproc_params *p,
45 const struct coproc_reg *r)
46{
47 if (p->is_write)
48 return ignore_write(vcpu, p);
49
50 *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
51 return true;
52}
53
54/* A15 TRM 4.3.60: R/O. */
55static bool access_cbar(struct kvm_vcpu *vcpu,
56 const struct coproc_params *p,
57 const struct coproc_reg *r)
58{
59 if (p->is_write)
60 return write_to_read_only(vcpu, p);
61 return read_zero(vcpu, p);
62}
63
64/* A15 TRM 4.3.48: R/O WI. */
65static bool access_l2ctlr(struct kvm_vcpu *vcpu,
66 const struct coproc_params *p,
67 const struct coproc_reg *r)
68{
69 if (p->is_write)
70 return ignore_write(vcpu, p);
71
72 *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
73 return true;
74}
75
76static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
77{
78 u32 l2ctlr, ncores;
79
80 asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
81 l2ctlr &= ~(3 << 24);
82 ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
83 l2ctlr |= (ncores & 3) << 24;
84
85 vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
86}
87
88static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
89{
90 u32 actlr;
91
92 /* ACTLR contains SMP bit: make sure you create all cpus first! */
93 asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
94 /* Make the SMP bit consistent with the guest configuration */
95 if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
96 actlr |= 1U << 6;
97 else
98 actlr &= ~(1U << 6);
99
100 vcpu->arch.cp15[c1_ACTLR] = actlr;
101}
102
103/* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */
104static bool access_l2ectlr(struct kvm_vcpu *vcpu,
105 const struct coproc_params *p,
106 const struct coproc_reg *r)
107{
108 if (p->is_write)
109 return ignore_write(vcpu, p);
110
111 *vcpu_reg(vcpu, p->Rt1) = 0;
112 return true;
113}
114
115/*
116 * A15-specific CP15 registers.
117 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
118 */
119static const struct coproc_reg a15_regs[] = {
120 /* MPIDR: we use VMPIDR for guest access. */
121 { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
122 NULL, reset_mpidr, c0_MPIDR },
123
124 /* SCTLR: swapped by interrupt.S. */
125 { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
126 NULL, reset_val, c1_SCTLR, 0x00C50078 },
127 /* ACTLR: trapped by HCR.TAC bit. */
128 { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
129 access_actlr, reset_actlr, c1_ACTLR },
130 /* CPACR: swapped by interrupt.S. */
131 { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
132 NULL, reset_val, c1_CPACR, 0x00000000 },
133
134 /*
135 * L2CTLR access (guest wants to know #CPUs).
136 */
137 { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
138 access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
139 { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
140
141 /* The Configuration Base Address Register. */
142 { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
143};
144
145static struct kvm_coproc_target_table a15_target_table = {
146 .target = KVM_ARM_TARGET_CORTEX_A15,
147 .table = a15_regs,
148 .num = ARRAY_SIZE(a15_regs),
149};
150
151static int __init coproc_a15_init(void)
152{
153 unsigned int i;
154
155 for (i = 1; i < ARRAY_SIZE(a15_regs); i++)
156 BUG_ON(cmp_reg(&a15_regs[i-1],
157 &a15_regs[i]) >= 0);
158
159 kvm_register_target_coproc_table(&a15_target_table);
160 return 0;
161}
162late_initcall(coproc_a15_init);
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
new file mode 100644
index 000000000000..d61450ac6665
--- /dev/null
+++ b/arch/arm/kvm/emulate.c
@@ -0,0 +1,373 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/mm.h>
20#include <linux/kvm_host.h>
21#include <asm/kvm_arm.h>
22#include <asm/kvm_emulate.h>
23#include <trace/events/kvm.h>
24
25#include "trace.h"
26
27#define VCPU_NR_MODES 6
28#define VCPU_REG_OFFSET_USR 0
29#define VCPU_REG_OFFSET_FIQ 1
30#define VCPU_REG_OFFSET_IRQ 2
31#define VCPU_REG_OFFSET_SVC 3
32#define VCPU_REG_OFFSET_ABT 4
33#define VCPU_REG_OFFSET_UND 5
34#define REG_OFFSET(_reg) \
35 (offsetof(struct kvm_regs, _reg) / sizeof(u32))
36
37#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
38
39static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
40 /* USR/SYS Registers */
41 [VCPU_REG_OFFSET_USR] = {
42 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
43 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
44 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
45 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
46 USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
47 },
48
49 /* FIQ Registers */
50 [VCPU_REG_OFFSET_FIQ] = {
51 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
52 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
53 USR_REG_OFFSET(6), USR_REG_OFFSET(7),
54 REG_OFFSET(fiq_regs[0]), /* r8 */
55 REG_OFFSET(fiq_regs[1]), /* r9 */
56 REG_OFFSET(fiq_regs[2]), /* r10 */
57 REG_OFFSET(fiq_regs[3]), /* r11 */
58 REG_OFFSET(fiq_regs[4]), /* r12 */
59 REG_OFFSET(fiq_regs[5]), /* r13 */
60 REG_OFFSET(fiq_regs[6]), /* r14 */
61 },
62
63 /* IRQ Registers */
64 [VCPU_REG_OFFSET_IRQ] = {
65 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
66 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
67 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
68 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
69 USR_REG_OFFSET(12),
70 REG_OFFSET(irq_regs[0]), /* r13 */
71 REG_OFFSET(irq_regs[1]), /* r14 */
72 },
73
74 /* SVC Registers */
75 [VCPU_REG_OFFSET_SVC] = {
76 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
77 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
78 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
79 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
80 USR_REG_OFFSET(12),
81 REG_OFFSET(svc_regs[0]), /* r13 */
82 REG_OFFSET(svc_regs[1]), /* r14 */
83 },
84
85 /* ABT Registers */
86 [VCPU_REG_OFFSET_ABT] = {
87 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
88 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
89 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
90 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
91 USR_REG_OFFSET(12),
92 REG_OFFSET(abt_regs[0]), /* r13 */
93 REG_OFFSET(abt_regs[1]), /* r14 */
94 },
95
96 /* UND Registers */
97 [VCPU_REG_OFFSET_UND] = {
98 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
99 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
100 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
101 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
102 USR_REG_OFFSET(12),
103 REG_OFFSET(und_regs[0]), /* r13 */
104 REG_OFFSET(und_regs[1]), /* r14 */
105 },
106};
107
108/*
109 * Return a pointer to the register number valid in the current mode of
110 * the virtual CPU.
111 */
112u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
113{
114 u32 *reg_array = (u32 *)&vcpu->arch.regs;
115 u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
116
117 switch (mode) {
118 case USR_MODE...SVC_MODE:
119 mode &= ~MODE32_BIT; /* 0 ... 3 */
120 break;
121
122 case ABT_MODE:
123 mode = VCPU_REG_OFFSET_ABT;
124 break;
125
126 case UND_MODE:
127 mode = VCPU_REG_OFFSET_UND;
128 break;
129
130 case SYSTEM_MODE:
131 mode = VCPU_REG_OFFSET_USR;
132 break;
133
134 default:
135 BUG();
136 }
137
138 return reg_array + vcpu_reg_offsets[mode][reg_num];
139}
140
141/*
142 * Return the SPSR for the current mode of the virtual CPU.
143 */
144u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
145{
146 u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
147 switch (mode) {
148 case SVC_MODE:
149 return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
150 case ABT_MODE:
151 return &vcpu->arch.regs.KVM_ARM_ABT_spsr;
152 case UND_MODE:
153 return &vcpu->arch.regs.KVM_ARM_UND_spsr;
154 case IRQ_MODE:
155 return &vcpu->arch.regs.KVM_ARM_IRQ_spsr;
156 case FIQ_MODE:
157 return &vcpu->arch.regs.KVM_ARM_FIQ_spsr;
158 default:
159 BUG();
160 }
161}
162
163/**
164 * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
165 * @vcpu: the vcpu pointer
166 * @run: the kvm_run structure pointer
167 *
168 * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
169 * halt execution of world-switches and schedule other host processes until
170 * there is an incoming IRQ or FIQ to the VM.
171 */
172int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
173{
174 trace_kvm_wfi(*vcpu_pc(vcpu));
175 kvm_vcpu_block(vcpu);
176 return 1;
177}
178
179/**
180 * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
181 * @vcpu: The VCPU pointer
182 *
183 * When exceptions occur while instructions are executed in Thumb IF-THEN
184 * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
185 * to do this little bit of work manually. The fields map like this:
186 *
187 * IT[7:0] -> CPSR[26:25],CPSR[15:10]
188 */
189static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
190{
191 unsigned long itbits, cond;
192 unsigned long cpsr = *vcpu_cpsr(vcpu);
193 bool is_arm = !(cpsr & PSR_T_BIT);
194
195 BUG_ON(is_arm && (cpsr & PSR_IT_MASK));
196
197 if (!(cpsr & PSR_IT_MASK))
198 return;
199
200 cond = (cpsr & 0xe000) >> 13;
201 itbits = (cpsr & 0x1c00) >> (10 - 2);
202 itbits |= (cpsr & (0x3 << 25)) >> 25;
203
204 /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
205 if ((itbits & 0x7) == 0)
206 itbits = cond = 0;
207 else
208 itbits = (itbits << 1) & 0x1f;
209
210 cpsr &= ~PSR_IT_MASK;
211 cpsr |= cond << 13;
212 cpsr |= (itbits & 0x1c) << (10 - 2);
213 cpsr |= (itbits & 0x3) << 25;
214 *vcpu_cpsr(vcpu) = cpsr;
215}
216
217/**
218 * kvm_skip_instr - skip a trapped instruction and proceed to the next
219 * @vcpu: The vcpu pointer
220 */
221void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
222{
223 bool is_thumb;
224
225 is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
226 if (is_thumb && !is_wide_instr)
227 *vcpu_pc(vcpu) += 2;
228 else
229 *vcpu_pc(vcpu) += 4;
230 kvm_adjust_itstate(vcpu);
231}
232
233
234/******************************************************************************
235 * Inject exceptions into the guest
236 */
237
238static u32 exc_vector_base(struct kvm_vcpu *vcpu)
239{
240 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
241 u32 vbar = vcpu->arch.cp15[c12_VBAR];
242
243 if (sctlr & SCTLR_V)
244 return 0xffff0000;
245 else /* always have security exceptions */
246 return vbar;
247}
248
249/**
250 * kvm_inject_undefined - inject an undefined exception into the guest
251 * @vcpu: The VCPU to receive the undefined exception
252 *
253 * It is assumed that this code is called from the VCPU thread and that the
254 * VCPU therefore is not currently executing guest code.
255 *
256 * Modelled after TakeUndefInstrException() pseudocode.
257 */
258void kvm_inject_undefined(struct kvm_vcpu *vcpu)
259{
260 u32 new_lr_value;
261 u32 new_spsr_value;
262 u32 cpsr = *vcpu_cpsr(vcpu);
263 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
264 bool is_thumb = (cpsr & PSR_T_BIT);
265 u32 vect_offset = 4;
266 u32 return_offset = (is_thumb) ? 2 : 4;
267
268 new_spsr_value = cpsr;
269 new_lr_value = *vcpu_pc(vcpu) - return_offset;
270
271 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
272 *vcpu_cpsr(vcpu) |= PSR_I_BIT;
273 *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
274
275 if (sctlr & SCTLR_TE)
276 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
277 if (sctlr & SCTLR_EE)
278 *vcpu_cpsr(vcpu) |= PSR_E_BIT;
279
280 /* Note: These now point to UND banked copies */
281 *vcpu_spsr(vcpu) = cpsr;
282 *vcpu_reg(vcpu, 14) = new_lr_value;
283
284 /* Branch to exception vector */
285 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
286}
287
288/*
289 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
290 * pseudocode.
291 */
292static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
293{
294 u32 new_lr_value;
295 u32 new_spsr_value;
296 u32 cpsr = *vcpu_cpsr(vcpu);
297 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
298 bool is_thumb = (cpsr & PSR_T_BIT);
299 u32 vect_offset;
300 u32 return_offset = (is_thumb) ? 4 : 0;
301 bool is_lpae;
302
303 new_spsr_value = cpsr;
304 new_lr_value = *vcpu_pc(vcpu) + return_offset;
305
306 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
307 *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
308 *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
309
310 if (sctlr & SCTLR_TE)
311 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
312 if (sctlr & SCTLR_EE)
313 *vcpu_cpsr(vcpu) |= PSR_E_BIT;
314
315 /* Note: These now point to ABT banked copies */
316 *vcpu_spsr(vcpu) = cpsr;
317 *vcpu_reg(vcpu, 14) = new_lr_value;
318
319 if (is_pabt)
320 vect_offset = 12;
321 else
322 vect_offset = 16;
323
324 /* Branch to exception vector */
325 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
326
327 if (is_pabt) {
328 /* Set DFAR and DFSR */
329 vcpu->arch.cp15[c6_IFAR] = addr;
330 is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
331 /* Always give debug fault for now - should give guest a clue */
332 if (is_lpae)
333 vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22;
334 else
335 vcpu->arch.cp15[c5_IFSR] = 2;
336 } else { /* !iabt */
337 /* Set DFAR and DFSR */
338 vcpu->arch.cp15[c6_DFAR] = addr;
339 is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
340 /* Always give debug fault for now - should give guest a clue */
341 if (is_lpae)
342 vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22;
343 else
344 vcpu->arch.cp15[c5_DFSR] = 2;
345 }
346
347}
348
349/**
350 * kvm_inject_dabt - inject a data abort into the guest
351 * @vcpu: The VCPU to receive the undefined exception
352 * @addr: The address to report in the DFAR
353 *
354 * It is assumed that this code is called from the VCPU thread and that the
355 * VCPU therefore is not currently executing guest code.
356 */
357void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
358{
359 inject_abt(vcpu, false, addr);
360}
361
362/**
363 * kvm_inject_pabt - inject a prefetch abort into the guest
364 * @vcpu: The VCPU to receive the undefined exception
365 * @addr: The address to report in the DFAR
366 *
367 * It is assumed that this code is called from the VCPU thread and that the
368 * VCPU therefore is not currently executing guest code.
369 */
370void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
371{
372 inject_abt(vcpu, true, addr);
373}
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
new file mode 100644
index 000000000000..2339d9609d36
--- /dev/null
+++ b/arch/arm/kvm/guest.c
@@ -0,0 +1,222 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/kvm_host.h>
22#include <linux/module.h>
23#include <linux/vmalloc.h>
24#include <linux/fs.h>
25#include <asm/uaccess.h>
26#include <asm/kvm.h>
27#include <asm/kvm_asm.h>
28#include <asm/kvm_emulate.h>
29#include <asm/kvm_coproc.h>
30
31#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
32#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
33
34struct kvm_stats_debugfs_item debugfs_entries[] = {
35 { NULL }
36};
37
38int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
39{
40 return 0;
41}
42
43static u64 core_reg_offset_from_id(u64 id)
44{
45 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
46}
47
48static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
49{
50 u32 __user *uaddr = (u32 __user *)(long)reg->addr;
51 struct kvm_regs *regs = &vcpu->arch.regs;
52 u64 off;
53
54 if (KVM_REG_SIZE(reg->id) != 4)
55 return -ENOENT;
56
57 /* Our ID is an index into the kvm_regs struct. */
58 off = core_reg_offset_from_id(reg->id);
59 if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
60 return -ENOENT;
61
62 return put_user(((u32 *)regs)[off], uaddr);
63}
64
65static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
66{
67 u32 __user *uaddr = (u32 __user *)(long)reg->addr;
68 struct kvm_regs *regs = &vcpu->arch.regs;
69 u64 off, val;
70
71 if (KVM_REG_SIZE(reg->id) != 4)
72 return -ENOENT;
73
74 /* Our ID is an index into the kvm_regs struct. */
75 off = core_reg_offset_from_id(reg->id);
76 if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
77 return -ENOENT;
78
79 if (get_user(val, uaddr) != 0)
80 return -EFAULT;
81
82 if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) {
83 unsigned long mode = val & MODE_MASK;
84 switch (mode) {
85 case USR_MODE:
86 case FIQ_MODE:
87 case IRQ_MODE:
88 case SVC_MODE:
89 case ABT_MODE:
90 case UND_MODE:
91 break;
92 default:
93 return -EINVAL;
94 }
95 }
96
97 ((u32 *)regs)[off] = val;
98 return 0;
99}
100
101int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
102{
103 return -EINVAL;
104}
105
106int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
107{
108 return -EINVAL;
109}
110
111static unsigned long num_core_regs(void)
112{
113 return sizeof(struct kvm_regs) / sizeof(u32);
114}
115
116/**
117 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
118 *
119 * This is for all registers.
120 */
121unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
122{
123 return num_core_regs() + kvm_arm_num_coproc_regs(vcpu);
124}
125
126/**
127 * kvm_arm_copy_reg_indices - get indices of all registers.
128 *
129 * We do core registers right here, then we apppend coproc regs.
130 */
131int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
132{
133 unsigned int i;
134 const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
135
136 for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
137 if (put_user(core_reg | i, uindices))
138 return -EFAULT;
139 uindices++;
140 }
141
142 return kvm_arm_copy_coproc_indices(vcpu, uindices);
143}
144
145int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
146{
147 /* We currently use nothing arch-specific in upper 32 bits */
148 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
149 return -EINVAL;
150
151 /* Register group 16 means we want a core register. */
152 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
153 return get_core_reg(vcpu, reg);
154
155 return kvm_arm_coproc_get_reg(vcpu, reg);
156}
157
158int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
159{
160 /* We currently use nothing arch-specific in upper 32 bits */
161 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
162 return -EINVAL;
163
164 /* Register group 16 means we set a core register. */
165 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
166 return set_core_reg(vcpu, reg);
167
168 return kvm_arm_coproc_set_reg(vcpu, reg);
169}
170
171int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
172 struct kvm_sregs *sregs)
173{
174 return -EINVAL;
175}
176
177int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
178 struct kvm_sregs *sregs)
179{
180 return -EINVAL;
181}
182
183int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
184 const struct kvm_vcpu_init *init)
185{
186 unsigned int i;
187
188 /* We can only do a cortex A15 for now. */
189 if (init->target != kvm_target_cpu())
190 return -EINVAL;
191
192 vcpu->arch.target = init->target;
193 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
194
195 /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
196 for (i = 0; i < sizeof(init->features) * 8; i++) {
197 if (test_bit(i, (void *)init->features)) {
198 if (i >= KVM_VCPU_MAX_FEATURES)
199 return -ENOENT;
200 set_bit(i, vcpu->arch.features);
201 }
202 }
203
204 /* Now we know what it is, we can reset it. */
205 return kvm_reset_vcpu(vcpu);
206}
207
208int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
209{
210 return -EINVAL;
211}
212
213int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
214{
215 return -EINVAL;
216}
217
218int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
219 struct kvm_translation *tr)
220{
221 return -EINVAL;
222}
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
new file mode 100644
index 000000000000..9f37a79b880b
--- /dev/null
+++ b/arch/arm/kvm/init.S
@@ -0,0 +1,114 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/linkage.h>
20#include <asm/unified.h>
21#include <asm/asm-offsets.h>
22#include <asm/kvm_asm.h>
23#include <asm/kvm_arm.h>
24
25/********************************************************************
26 * Hypervisor initialization
27 * - should be called with:
28 * r0,r1 = Hypervisor pgd pointer
29 * r2 = top of Hyp stack (kernel VA)
30 * r3 = pointer to hyp vectors
31 */
32
33 .text
34 .pushsection .hyp.idmap.text,"ax"
35 .align 5
36__kvm_hyp_init:
37 .globl __kvm_hyp_init
38
39 @ Hyp-mode exception vector
40 W(b) .
41 W(b) .
42 W(b) .
43 W(b) .
44 W(b) .
45 W(b) __do_hyp_init
46 W(b) .
47 W(b) .
48
49__do_hyp_init:
50 @ Set the HTTBR to point to the hypervisor PGD pointer passed
51 mcrr p15, 4, r0, r1, c2
52
53 @ Set the HTCR and VTCR to the same shareability and cacheability
54 @ settings as the non-secure TTBCR and with T0SZ == 0.
55 mrc p15, 4, r0, c2, c0, 2 @ HTCR
56 ldr r12, =HTCR_MASK
57 bic r0, r0, r12
58 mrc p15, 0, r1, c2, c0, 2 @ TTBCR
59 and r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ)
60 orr r0, r0, r1
61 mcr p15, 4, r0, c2, c0, 2 @ HTCR
62
63 mrc p15, 4, r1, c2, c1, 2 @ VTCR
64 ldr r12, =VTCR_MASK
65 bic r1, r1, r12
66 bic r0, r0, #(~VTCR_HTCR_SH) @ clear non-reusable HTCR bits
67 orr r1, r0, r1
68 orr r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S)
69 mcr p15, 4, r1, c2, c1, 2 @ VTCR
70
71 @ Use the same memory attributes for hyp. accesses as the kernel
72 @ (copy MAIRx ro HMAIRx).
73 mrc p15, 0, r0, c10, c2, 0
74 mcr p15, 4, r0, c10, c2, 0
75 mrc p15, 0, r0, c10, c2, 1
76 mcr p15, 4, r0, c10, c2, 1
77
78 @ Set the HSCTLR to:
79 @ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
80 @ - Endianness: Kernel config
81 @ - Fast Interrupt Features: Kernel config
82 @ - Write permission implies XN: disabled
83 @ - Instruction cache: enabled
84 @ - Data/Unified cache: enabled
85 @ - Memory alignment checks: enabled
86 @ - MMU: enabled (this code must be run from an identity mapping)
87 mrc p15, 4, r0, c1, c0, 0 @ HSCR
88 ldr r12, =HSCTLR_MASK
89 bic r0, r0, r12
90 mrc p15, 0, r1, c1, c0, 0 @ SCTLR
91 ldr r12, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
92 and r1, r1, r12
93 ARM( ldr r12, =(HSCTLR_M | HSCTLR_A) )
94 THUMB( ldr r12, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
95 orr r1, r1, r12
96 orr r0, r0, r1
97 isb
98 mcr p15, 4, r0, c1, c0, 0 @ HSCR
99 isb
100
101 @ Set stack pointer and return to the kernel
102 mov sp, r2
103
104 @ Set HVBAR to point to the HYP vectors
105 mcr p15, 4, r3, c12, c0, 0 @ HVBAR
106
107 eret
108
109 .ltorg
110
111 .globl __kvm_hyp_init_end
112__kvm_hyp_init_end:
113
114 .popsection
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
new file mode 100644
index 000000000000..c5400d2e97ca
--- /dev/null
+++ b/arch/arm/kvm/interrupts.S
@@ -0,0 +1,478 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/linkage.h>
20#include <linux/const.h>
21#include <asm/unified.h>
22#include <asm/page.h>
23#include <asm/ptrace.h>
24#include <asm/asm-offsets.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_arm.h>
27#include <asm/vfpmacros.h>
28#include "interrupts_head.S"
29
30 .text
31
32__kvm_hyp_code_start:
33 .globl __kvm_hyp_code_start
34
35/********************************************************************
36 * Flush per-VMID TLBs
37 *
38 * void __kvm_tlb_flush_vmid(struct kvm *kvm);
39 *
40 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
41 * inside the inner-shareable domain (which is the case for all v7
42 * implementations). If we come across a non-IS SMP implementation, we'll
43 * have to use an IPI based mechanism. Until then, we stick to the simple
44 * hardware assisted version.
45 */
46ENTRY(__kvm_tlb_flush_vmid)
47 push {r2, r3}
48
49 add r0, r0, #KVM_VTTBR
50 ldrd r2, r3, [r0]
51 mcrr p15, 6, r2, r3, c2 @ Write VTTBR
52 isb
53 mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
54 dsb
55 isb
56 mov r2, #0
57 mov r3, #0
58 mcrr p15, 6, r2, r3, c2 @ Back to VMID #0
59 isb @ Not necessary if followed by eret
60
61 pop {r2, r3}
62 bx lr
63ENDPROC(__kvm_tlb_flush_vmid)
64
65/********************************************************************
66 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
67 * domain, for all VMIDs
68 *
69 * void __kvm_flush_vm_context(void);
70 */
71ENTRY(__kvm_flush_vm_context)
72 mov r0, #0 @ rn parameter for c15 flushes is SBZ
73
74 /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
75 mcr p15, 4, r0, c8, c3, 4
76 /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
77 mcr p15, 0, r0, c7, c1, 0
78 dsb
79 isb @ Not necessary if followed by eret
80
81 bx lr
82ENDPROC(__kvm_flush_vm_context)
83
84
85/********************************************************************
86 * Hypervisor world-switch code
87 *
88 *
89 * int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
90 */
91ENTRY(__kvm_vcpu_run)
92 @ Save the vcpu pointer
93 mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR
94
95 save_host_regs
96
97 @ Store hardware CP15 state and load guest state
98 read_cp15_state store_to_vcpu = 0
99 write_cp15_state read_from_vcpu = 1
100
101 @ If the host kernel has not been configured with VFPv3 support,
102 @ then it is safer if we deny guests from using it as well.
103#ifdef CONFIG_VFPv3
104 @ Set FPEXC_EN so the guest doesn't trap floating point instructions
105 VFPFMRX r2, FPEXC @ VMRS
106 push {r2}
107 orr r2, r2, #FPEXC_EN
108 VFPFMXR FPEXC, r2 @ VMSR
109#endif
110
111 @ Configure Hyp-role
112 configure_hyp_role vmentry
113
114 @ Trap coprocessor CRx accesses
115 set_hstr vmentry
116 set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
117 set_hdcr vmentry
118
119 @ Write configured ID register into MIDR alias
120 ldr r1, [vcpu, #VCPU_MIDR]
121 mcr p15, 4, r1, c0, c0, 0
122
123 @ Write guest view of MPIDR into VMPIDR
124 ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
125 mcr p15, 4, r1, c0, c0, 5
126
127 @ Set up guest memory translation
128 ldr r1, [vcpu, #VCPU_KVM]
129 add r1, r1, #KVM_VTTBR
130 ldrd r2, r3, [r1]
131 mcrr p15, 6, r2, r3, c2 @ Write VTTBR
132
133 @ We're all done, just restore the GPRs and go to the guest
134 restore_guest_regs
135 clrex @ Clear exclusive monitor
136 eret
137
138__kvm_vcpu_return:
139 /*
140 * return convention:
141 * guest r0, r1, r2 saved on the stack
142 * r0: vcpu pointer
143 * r1: exception code
144 */
145 save_guest_regs
146
147 @ Set VMID == 0
148 mov r2, #0
149 mov r3, #0
150 mcrr p15, 6, r2, r3, c2 @ Write VTTBR
151
152 @ Don't trap coprocessor accesses for host kernel
153 set_hstr vmexit
154 set_hdcr vmexit
155 set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
156
157#ifdef CONFIG_VFPv3
158 @ Save floating point registers we if let guest use them.
159 tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
160 bne after_vfp_restore
161
162 @ Switch VFP/NEON hardware state to the host's
163 add r7, vcpu, #VCPU_VFP_GUEST
164 store_vfp_state r7
165 add r7, vcpu, #VCPU_VFP_HOST
166 ldr r7, [r7]
167 restore_vfp_state r7
168
169after_vfp_restore:
170 @ Restore FPEXC_EN which we clobbered on entry
171 pop {r2}
172 VFPFMXR FPEXC, r2
173#endif
174
175 @ Reset Hyp-role
176 configure_hyp_role vmexit
177
178 @ Let host read hardware MIDR
179 mrc p15, 0, r2, c0, c0, 0
180 mcr p15, 4, r2, c0, c0, 0
181
182 @ Back to hardware MPIDR
183 mrc p15, 0, r2, c0, c0, 5
184 mcr p15, 4, r2, c0, c0, 5
185
186 @ Store guest CP15 state and restore host state
187 read_cp15_state store_to_vcpu = 1
188 write_cp15_state read_from_vcpu = 0
189
190 restore_host_regs
191 clrex @ Clear exclusive monitor
192 mov r0, r1 @ Return the return code
193 mov r1, #0 @ Clear upper bits in return value
194 bx lr @ return to IOCTL
195
196/********************************************************************
197 * Call function in Hyp mode
198 *
199 *
200 * u64 kvm_call_hyp(void *hypfn, ...);
201 *
202 * This is not really a variadic function in the classic C-way and care must
203 * be taken when calling this to ensure parameters are passed in registers
204 * only, since the stack will change between the caller and the callee.
205 *
206 * Call the function with the first argument containing a pointer to the
207 * function you wish to call in Hyp mode, and subsequent arguments will be
208 * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
209 * function pointer can be passed). The function being called must be mapped
210 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
211 * passed in r0 and r1.
212 *
213 * The calling convention follows the standard AAPCS:
214 * r0 - r3: caller save
215 * r12: caller save
216 * rest: callee save
217 */
218ENTRY(kvm_call_hyp)
219 hvc #0
220 bx lr
221
222/********************************************************************
223 * Hypervisor exception vector and handlers
224 *
225 *
226 * The KVM/ARM Hypervisor ABI is defined as follows:
227 *
228 * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
229 * instruction is issued since all traps are disabled when running the host
230 * kernel as per the Hyp-mode initialization at boot time.
231 *
232 * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc
233 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
234 * host kernel) and they cause a trap to the vector page + offset 0xc when HVC
235 * instructions are called from within Hyp-mode.
236 *
237 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
238 * Switching to Hyp mode is done through a simple HVC #0 instruction. The
239 * exception vector code will check that the HVC comes from VMID==0 and if
240 * so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
241 * - r0 contains a pointer to a HYP function
242 * - r1, r2, and r3 contain arguments to the above function.
243 * - The HYP function will be called with its arguments in r0, r1 and r2.
244 * On HYP function return, we return directly to SVC.
245 *
246 * Note that the above is used to execute code in Hyp-mode from a host-kernel
247 * point of view, and is a different concept from performing a world-switch and
248 * executing guest code SVC mode (with a VMID != 0).
249 */
250
251/* Handle undef, svc, pabt, or dabt by crashing with a user notice */
252.macro bad_exception exception_code, panic_str
253 push {r0-r2}
254 mrrc p15, 6, r0, r1, c2 @ Read VTTBR
255 lsr r1, r1, #16
256 ands r1, r1, #0xff
257 beq 99f
258
259 load_vcpu @ Load VCPU pointer
260 .if \exception_code == ARM_EXCEPTION_DATA_ABORT
261 mrc p15, 4, r2, c5, c2, 0 @ HSR
262 mrc p15, 4, r1, c6, c0, 0 @ HDFAR
263 str r2, [vcpu, #VCPU_HSR]
264 str r1, [vcpu, #VCPU_HxFAR]
265 .endif
266 .if \exception_code == ARM_EXCEPTION_PREF_ABORT
267 mrc p15, 4, r2, c5, c2, 0 @ HSR
268 mrc p15, 4, r1, c6, c0, 2 @ HIFAR
269 str r2, [vcpu, #VCPU_HSR]
270 str r1, [vcpu, #VCPU_HxFAR]
271 .endif
272 mov r1, #\exception_code
273 b __kvm_vcpu_return
274
275 @ We were in the host already. Let's craft a panic-ing return to SVC.
27699: mrs r2, cpsr
277 bic r2, r2, #MODE_MASK
278 orr r2, r2, #SVC_MODE
279THUMB( orr r2, r2, #PSR_T_BIT )
280 msr spsr_cxsf, r2
281 mrs r1, ELR_hyp
282 ldr r2, =BSYM(panic)
283 msr ELR_hyp, r2
284 ldr r0, =\panic_str
285 eret
286.endm
287
288 .text
289
290 .align 5
291__kvm_hyp_vector:
292 .globl __kvm_hyp_vector
293
294 @ Hyp-mode exception vector
295 W(b) hyp_reset
296 W(b) hyp_undef
297 W(b) hyp_svc
298 W(b) hyp_pabt
299 W(b) hyp_dabt
300 W(b) hyp_hvc
301 W(b) hyp_irq
302 W(b) hyp_fiq
303
304 .align
305hyp_reset:
306 b hyp_reset
307
308 .align
309hyp_undef:
310 bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str
311
312 .align
313hyp_svc:
314 bad_exception ARM_EXCEPTION_HVC, svc_die_str
315
316 .align
317hyp_pabt:
318 bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str
319
320 .align
321hyp_dabt:
322 bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str
323
324 .align
325hyp_hvc:
326 /*
327 * Getting here is either becuase of a trap from a guest or from calling
328 * HVC from the host kernel, which means "switch to Hyp mode".
329 */
330 push {r0, r1, r2}
331
332 @ Check syndrome register
333 mrc p15, 4, r1, c5, c2, 0 @ HSR
334 lsr r0, r1, #HSR_EC_SHIFT
335#ifdef CONFIG_VFPv3
336 cmp r0, #HSR_EC_CP_0_13
337 beq switch_to_guest_vfp
338#endif
339 cmp r0, #HSR_EC_HVC
340 bne guest_trap @ Not HVC instr.
341
342 /*
343 * Let's check if the HVC came from VMID 0 and allow simple
344 * switch to Hyp mode
345 */
346 mrrc p15, 6, r0, r2, c2
347 lsr r2, r2, #16
348 and r2, r2, #0xff
349 cmp r2, #0
350 bne guest_trap @ Guest called HVC
351
352host_switch_to_hyp:
353 pop {r0, r1, r2}
354
355 push {lr}
356 mrs lr, SPSR
357 push {lr}
358
359 mov lr, r0
360 mov r0, r1
361 mov r1, r2
362 mov r2, r3
363
364THUMB( orr lr, #1)
365 blx lr @ Call the HYP function
366
367 pop {lr}
368 msr SPSR_csxf, lr
369 pop {lr}
370 eret
371
372guest_trap:
373 load_vcpu @ Load VCPU pointer to r0
374 str r1, [vcpu, #VCPU_HSR]
375
376 @ Check if we need the fault information
377 lsr r1, r1, #HSR_EC_SHIFT
378 cmp r1, #HSR_EC_IABT
379 mrceq p15, 4, r2, c6, c0, 2 @ HIFAR
380 beq 2f
381 cmp r1, #HSR_EC_DABT
382 bne 1f
383 mrc p15, 4, r2, c6, c0, 0 @ HDFAR
384
3852: str r2, [vcpu, #VCPU_HxFAR]
386
387 /*
388 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
389 *
390 * Abort on the stage 2 translation for a memory access from a
391 * Non-secure PL1 or PL0 mode:
392 *
393 * For any Access flag fault or Translation fault, and also for any
394 * Permission fault on the stage 2 translation of a memory access
395 * made as part of a translation table walk for a stage 1 translation,
396 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
397 * is UNKNOWN.
398 */
399
400 /* Check for permission fault, and S1PTW */
401 mrc p15, 4, r1, c5, c2, 0 @ HSR
402 and r0, r1, #HSR_FSC_TYPE
403 cmp r0, #FSC_PERM
404 tsteq r1, #(1 << 7) @ S1PTW
405 mrcne p15, 4, r2, c6, c0, 4 @ HPFAR
406 bne 3f
407
408 /* Resolve IPA using the xFAR */
409 mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR
410 isb
411 mrrc p15, 0, r0, r1, c7 @ PAR
412 tst r0, #1
413 bne 4f @ Failed translation
414 ubfx r2, r0, #12, #20
415 lsl r2, r2, #4
416 orr r2, r2, r1, lsl #24
417
4183: load_vcpu @ Load VCPU pointer to r0
419 str r2, [r0, #VCPU_HPFAR]
420
4211: mov r1, #ARM_EXCEPTION_HVC
422 b __kvm_vcpu_return
423
4244: pop {r0, r1, r2} @ Failed translation, return to guest
425 eret
426
427/*
428 * If VFPv3 support is not available, then we will not switch the VFP
429 * registers; however cp10 and cp11 accesses will still trap and fallback
430 * to the regular coprocessor emulation code, which currently will
431 * inject an undefined exception to the guest.
432 */
433#ifdef CONFIG_VFPv3
434switch_to_guest_vfp:
435 load_vcpu @ Load VCPU pointer to r0
436 push {r3-r7}
437
438 @ NEON/VFP used. Turn on VFP access.
439 set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
440
441 @ Switch VFP/NEON hardware state to the guest's
442 add r7, r0, #VCPU_VFP_HOST
443 ldr r7, [r7]
444 store_vfp_state r7
445 add r7, r0, #VCPU_VFP_GUEST
446 restore_vfp_state r7
447
448 pop {r3-r7}
449 pop {r0-r2}
450 eret
451#endif
452
453 .align
454hyp_irq:
455 push {r0, r1, r2}
456 mov r1, #ARM_EXCEPTION_IRQ
457 load_vcpu @ Load VCPU pointer to r0
458 b __kvm_vcpu_return
459
460 .align
461hyp_fiq:
462 b hyp_fiq
463
464 .ltorg
465
466__kvm_hyp_code_end:
467 .globl __kvm_hyp_code_end
468
469 .section ".rodata"
470
471und_die_str:
472 .ascii "unexpected undefined exception in Hyp mode at: %#08x"
473pabt_die_str:
474 .ascii "unexpected prefetch abort in Hyp mode at: %#08x"
475dabt_die_str:
476 .ascii "unexpected data abort in Hyp mode at: %#08x"
477svc_die_str:
478 .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x"
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
new file mode 100644
index 000000000000..6a95d341e9c5
--- /dev/null
+++ b/arch/arm/kvm/interrupts_head.S
@@ -0,0 +1,441 @@
1#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
2#define VCPU_USR_SP (VCPU_USR_REG(13))
3#define VCPU_USR_LR (VCPU_USR_REG(14))
4#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4))
5
6/*
7 * Many of these macros need to access the VCPU structure, which is always
8 * held in r0. These macros should never clobber r1, as it is used to hold the
9 * exception code on the return path (except of course the macro that switches
10 * all the registers before the final jump to the VM).
11 */
12vcpu .req r0 @ vcpu pointer always in r0
13
14/* Clobbers {r2-r6} */
15.macro store_vfp_state vfp_base
16 @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions
17 VFPFMRX r2, FPEXC
18 @ Make sure VFP is enabled so we can touch the registers.
19 orr r6, r2, #FPEXC_EN
20 VFPFMXR FPEXC, r6
21
22 VFPFMRX r3, FPSCR
23 tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
24 beq 1f
25 @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
26 @ we only need to save them if FPEXC_EX is set.
27 VFPFMRX r4, FPINST
28 tst r2, #FPEXC_FP2V
29 VFPFMRX r5, FPINST2, ne @ vmrsne
30 bic r6, r2, #FPEXC_EX @ FPEXC_EX disable
31 VFPFMXR FPEXC, r6
321:
33 VFPFSTMIA \vfp_base, r6 @ Save VFP registers
34 stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2
35.endm
36
37/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */
38.macro restore_vfp_state vfp_base
39 VFPFLDMIA \vfp_base, r6 @ Load VFP registers
40 ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2
41
42 VFPFMXR FPSCR, r3
43 tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
44 beq 1f
45 VFPFMXR FPINST, r4
46 tst r2, #FPEXC_FP2V
47 VFPFMXR FPINST2, r5, ne
481:
49 VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN)
50.endm
51
52/* These are simply for the macros to work - value don't have meaning */
53.equ usr, 0
54.equ svc, 1
55.equ abt, 2
56.equ und, 3
57.equ irq, 4
58.equ fiq, 5
59
60.macro push_host_regs_mode mode
61 mrs r2, SP_\mode
62 mrs r3, LR_\mode
63 mrs r4, SPSR_\mode
64 push {r2, r3, r4}
65.endm
66
67/*
68 * Store all host persistent registers on the stack.
69 * Clobbers all registers, in all modes, except r0 and r1.
70 */
71.macro save_host_regs
72 /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */
73 mrs r2, ELR_hyp
74 push {r2}
75
76 /* usr regs */
77 push {r4-r12} @ r0-r3 are always clobbered
78 mrs r2, SP_usr
79 mov r3, lr
80 push {r2, r3}
81
82 push_host_regs_mode svc
83 push_host_regs_mode abt
84 push_host_regs_mode und
85 push_host_regs_mode irq
86
87 /* fiq regs */
88 mrs r2, r8_fiq
89 mrs r3, r9_fiq
90 mrs r4, r10_fiq
91 mrs r5, r11_fiq
92 mrs r6, r12_fiq
93 mrs r7, SP_fiq
94 mrs r8, LR_fiq
95 mrs r9, SPSR_fiq
96 push {r2-r9}
97.endm
98
99.macro pop_host_regs_mode mode
100 pop {r2, r3, r4}
101 msr SP_\mode, r2
102 msr LR_\mode, r3
103 msr SPSR_\mode, r4
104.endm
105
106/*
107 * Restore all host registers from the stack.
108 * Clobbers all registers, in all modes, except r0 and r1.
109 */
110.macro restore_host_regs
111 pop {r2-r9}
112 msr r8_fiq, r2
113 msr r9_fiq, r3
114 msr r10_fiq, r4
115 msr r11_fiq, r5
116 msr r12_fiq, r6
117 msr SP_fiq, r7
118 msr LR_fiq, r8
119 msr SPSR_fiq, r9
120
121 pop_host_regs_mode irq
122 pop_host_regs_mode und
123 pop_host_regs_mode abt
124 pop_host_regs_mode svc
125
126 pop {r2, r3}
127 msr SP_usr, r2
128 mov lr, r3
129 pop {r4-r12}
130
131 pop {r2}
132 msr ELR_hyp, r2
133.endm
134
135/*
136 * Restore SP, LR and SPSR for a given mode. offset is the offset of
137 * this mode's registers from the VCPU base.
138 *
139 * Assumes vcpu pointer in vcpu reg
140 *
141 * Clobbers r1, r2, r3, r4.
142 */
143.macro restore_guest_regs_mode mode, offset
144 add r1, vcpu, \offset
145 ldm r1, {r2, r3, r4}
146 msr SP_\mode, r2
147 msr LR_\mode, r3
148 msr SPSR_\mode, r4
149.endm
150
151/*
152 * Restore all guest registers from the vcpu struct.
153 *
154 * Assumes vcpu pointer in vcpu reg
155 *
156 * Clobbers *all* registers.
157 */
158.macro restore_guest_regs
159 restore_guest_regs_mode svc, #VCPU_SVC_REGS
160 restore_guest_regs_mode abt, #VCPU_ABT_REGS
161 restore_guest_regs_mode und, #VCPU_UND_REGS
162 restore_guest_regs_mode irq, #VCPU_IRQ_REGS
163
164 add r1, vcpu, #VCPU_FIQ_REGS
165 ldm r1, {r2-r9}
166 msr r8_fiq, r2
167 msr r9_fiq, r3
168 msr r10_fiq, r4
169 msr r11_fiq, r5
170 msr r12_fiq, r6
171 msr SP_fiq, r7
172 msr LR_fiq, r8
173 msr SPSR_fiq, r9
174
175 @ Load return state
176 ldr r2, [vcpu, #VCPU_PC]
177 ldr r3, [vcpu, #VCPU_CPSR]
178 msr ELR_hyp, r2
179 msr SPSR_cxsf, r3
180
181 @ Load user registers
182 ldr r2, [vcpu, #VCPU_USR_SP]
183 ldr r3, [vcpu, #VCPU_USR_LR]
184 msr SP_usr, r2
185 mov lr, r3
186 add vcpu, vcpu, #(VCPU_USR_REGS)
187 ldm vcpu, {r0-r12}
188.endm
189
190/*
191 * Save SP, LR and SPSR for a given mode. offset is the offset of
192 * this mode's registers from the VCPU base.
193 *
194 * Assumes vcpu pointer in vcpu reg
195 *
196 * Clobbers r2, r3, r4, r5.
197 */
198.macro save_guest_regs_mode mode, offset
199 add r2, vcpu, \offset
200 mrs r3, SP_\mode
201 mrs r4, LR_\mode
202 mrs r5, SPSR_\mode
203 stm r2, {r3, r4, r5}
204.endm
205
206/*
207 * Save all guest registers to the vcpu struct
208 * Expects guest's r0, r1, r2 on the stack.
209 *
210 * Assumes vcpu pointer in vcpu reg
211 *
212 * Clobbers r2, r3, r4, r5.
213 */
214.macro save_guest_regs
215 @ Store usr registers
216 add r2, vcpu, #VCPU_USR_REG(3)
217 stm r2, {r3-r12}
218 add r2, vcpu, #VCPU_USR_REG(0)
219 pop {r3, r4, r5} @ r0, r1, r2
220 stm r2, {r3, r4, r5}
221 mrs r2, SP_usr
222 mov r3, lr
223 str r2, [vcpu, #VCPU_USR_SP]
224 str r3, [vcpu, #VCPU_USR_LR]
225
226 @ Store return state
227 mrs r2, ELR_hyp
228 mrs r3, spsr
229 str r2, [vcpu, #VCPU_PC]
230 str r3, [vcpu, #VCPU_CPSR]
231
232 @ Store other guest registers
233 save_guest_regs_mode svc, #VCPU_SVC_REGS
234 save_guest_regs_mode abt, #VCPU_ABT_REGS
235 save_guest_regs_mode und, #VCPU_UND_REGS
236 save_guest_regs_mode irq, #VCPU_IRQ_REGS
237.endm
238
239/* Reads cp15 registers from hardware and stores them in memory
240 * @store_to_vcpu: If 0, registers are written in-order to the stack,
241 * otherwise to the VCPU struct pointed to by vcpup
242 *
243 * Assumes vcpu pointer in vcpu reg
244 *
245 * Clobbers r2 - r12
246 */
247.macro read_cp15_state store_to_vcpu
248 mrc p15, 0, r2, c1, c0, 0 @ SCTLR
249 mrc p15, 0, r3, c1, c0, 2 @ CPACR
250 mrc p15, 0, r4, c2, c0, 2 @ TTBCR
251 mrc p15, 0, r5, c3, c0, 0 @ DACR
252 mrrc p15, 0, r6, r7, c2 @ TTBR 0
253 mrrc p15, 1, r8, r9, c2 @ TTBR 1
254 mrc p15, 0, r10, c10, c2, 0 @ PRRR
255 mrc p15, 0, r11, c10, c2, 1 @ NMRR
256 mrc p15, 2, r12, c0, c0, 0 @ CSSELR
257
258 .if \store_to_vcpu == 0
259 push {r2-r12} @ Push CP15 registers
260 .else
261 str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
262 str r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
263 str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
264 str r5, [vcpu, #CP15_OFFSET(c3_DACR)]
265 add r2, vcpu, #CP15_OFFSET(c2_TTBR0)
266 strd r6, r7, [r2]
267 add r2, vcpu, #CP15_OFFSET(c2_TTBR1)
268 strd r8, r9, [r2]
269 str r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
270 str r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
271 str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
272 .endif
273
274 mrc p15, 0, r2, c13, c0, 1 @ CID
275 mrc p15, 0, r3, c13, c0, 2 @ TID_URW
276 mrc p15, 0, r4, c13, c0, 3 @ TID_URO
277 mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV
278 mrc p15, 0, r6, c5, c0, 0 @ DFSR
279 mrc p15, 0, r7, c5, c0, 1 @ IFSR
280 mrc p15, 0, r8, c5, c1, 0 @ ADFSR
281 mrc p15, 0, r9, c5, c1, 1 @ AIFSR
282 mrc p15, 0, r10, c6, c0, 0 @ DFAR
283 mrc p15, 0, r11, c6, c0, 2 @ IFAR
284 mrc p15, 0, r12, c12, c0, 0 @ VBAR
285
286 .if \store_to_vcpu == 0
287 push {r2-r12} @ Push CP15 registers
288 .else
289 str r2, [vcpu, #CP15_OFFSET(c13_CID)]
290 str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
291 str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
292 str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
293 str r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
294 str r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
295 str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
296 str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
297 str r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
298 str r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
299 str r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
300 .endif
301.endm
302
303/*
304 * Reads cp15 registers from memory and writes them to hardware
305 * @read_from_vcpu: If 0, registers are read in-order from the stack,
306 * otherwise from the VCPU struct pointed to by vcpup
307 *
308 * Assumes vcpu pointer in vcpu reg
309 */
310.macro write_cp15_state read_from_vcpu
311 .if \read_from_vcpu == 0
312 pop {r2-r12}
313 .else
314 ldr r2, [vcpu, #CP15_OFFSET(c13_CID)]
315 ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
316 ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
317 ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
318 ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
319 ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
320 ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
321 ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
322 ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
323 ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
324 ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
325 .endif
326
327 mcr p15, 0, r2, c13, c0, 1 @ CID
328 mcr p15, 0, r3, c13, c0, 2 @ TID_URW
329 mcr p15, 0, r4, c13, c0, 3 @ TID_URO
330 mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV
331 mcr p15, 0, r6, c5, c0, 0 @ DFSR
332 mcr p15, 0, r7, c5, c0, 1 @ IFSR
333 mcr p15, 0, r8, c5, c1, 0 @ ADFSR
334 mcr p15, 0, r9, c5, c1, 1 @ AIFSR
335 mcr p15, 0, r10, c6, c0, 0 @ DFAR
336 mcr p15, 0, r11, c6, c0, 2 @ IFAR
337 mcr p15, 0, r12, c12, c0, 0 @ VBAR
338
339 .if \read_from_vcpu == 0
340 pop {r2-r12}
341 .else
342 ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
343 ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
344 ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
345 ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)]
346 add r12, vcpu, #CP15_OFFSET(c2_TTBR0)
347 ldrd r6, r7, [r12]
348 add r12, vcpu, #CP15_OFFSET(c2_TTBR1)
349 ldrd r8, r9, [r12]
350 ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
351 ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
352 ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
353 .endif
354
355 mcr p15, 0, r2, c1, c0, 0 @ SCTLR
356 mcr p15, 0, r3, c1, c0, 2 @ CPACR
357 mcr p15, 0, r4, c2, c0, 2 @ TTBCR
358 mcr p15, 0, r5, c3, c0, 0 @ DACR
359 mcrr p15, 0, r6, r7, c2 @ TTBR 0
360 mcrr p15, 1, r8, r9, c2 @ TTBR 1
361 mcr p15, 0, r10, c10, c2, 0 @ PRRR
362 mcr p15, 0, r11, c10, c2, 1 @ NMRR
363 mcr p15, 2, r12, c0, c0, 0 @ CSSELR
364.endm
365
366/*
367 * Save the VGIC CPU state into memory
368 *
369 * Assumes vcpu pointer in vcpu reg
370 */
371.macro save_vgic_state
372.endm
373
374/*
375 * Restore the VGIC CPU state from memory
376 *
377 * Assumes vcpu pointer in vcpu reg
378 */
379.macro restore_vgic_state
380.endm
381
382.equ vmentry, 0
383.equ vmexit, 1
384
385/* Configures the HSTR (Hyp System Trap Register) on entry/return
386 * (hardware reset value is 0) */
387.macro set_hstr operation
388 mrc p15, 4, r2, c1, c1, 3
389 ldr r3, =HSTR_T(15)
390 .if \operation == vmentry
391 orr r2, r2, r3 @ Trap CR{15}
392 .else
393 bic r2, r2, r3 @ Don't trap any CRx accesses
394 .endif
395 mcr p15, 4, r2, c1, c1, 3
396.endm
397
398/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
399 * (hardware reset value is 0). Keep previous value in r2. */
400.macro set_hcptr operation, mask
401 mrc p15, 4, r2, c1, c1, 2
402 ldr r3, =\mask
403 .if \operation == vmentry
404 orr r3, r2, r3 @ Trap coproc-accesses defined in mask
405 .else
406 bic r3, r2, r3 @ Don't trap defined coproc-accesses
407 .endif
408 mcr p15, 4, r3, c1, c1, 2
409.endm
410
411/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
412 * (hardware reset value is 0) */
413.macro set_hdcr operation
414 mrc p15, 4, r2, c1, c1, 1
415 ldr r3, =(HDCR_TPM|HDCR_TPMCR)
416 .if \operation == vmentry
417 orr r2, r2, r3 @ Trap some perfmon accesses
418 .else
419 bic r2, r2, r3 @ Don't trap any perfmon accesses
420 .endif
421 mcr p15, 4, r2, c1, c1, 1
422.endm
423
424/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
425.macro configure_hyp_role operation
426 mrc p15, 4, r2, c1, c1, 0 @ HCR
427 bic r2, r2, #HCR_VIRT_EXCP_MASK
428 ldr r3, =HCR_GUEST_MASK
429 .if \operation == vmentry
430 orr r2, r2, r3
431 ldr r3, [vcpu, #VCPU_IRQ_LINES]
432 orr r2, r2, r3
433 .else
434 bic r2, r2, r3
435 .endif
436 mcr p15, 4, r2, c1, c1, 0
437.endm
438
439.macro load_vcpu
440 mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR
441.endm
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
new file mode 100644
index 000000000000..0144baf82904
--- /dev/null
+++ b/arch/arm/kvm/mmio.c
@@ -0,0 +1,153 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/kvm_host.h>
20#include <asm/kvm_mmio.h>
21#include <asm/kvm_emulate.h>
22#include <trace/events/kvm.h>
23
24#include "trace.h"
25
26/**
27 * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
28 * @vcpu: The VCPU pointer
29 * @run: The VCPU run struct containing the mmio data
30 *
31 * This should only be called after returning from userspace for MMIO load
32 * emulation.
33 */
34int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
35{
36 __u32 *dest;
37 unsigned int len;
38 int mask;
39
40 if (!run->mmio.is_write) {
41 dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
42 memset(dest, 0, sizeof(int));
43
44 len = run->mmio.len;
45 if (len > 4)
46 return -EINVAL;
47
48 memcpy(dest, run->mmio.data, len);
49
50 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
51 *((u64 *)run->mmio.data));
52
53 if (vcpu->arch.mmio_decode.sign_extend && len < 4) {
54 mask = 1U << ((len * 8) - 1);
55 *dest = (*dest ^ mask) - mask;
56 }
57 }
58
59 return 0;
60}
61
62static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
63 struct kvm_exit_mmio *mmio)
64{
65 unsigned long rt, len;
66 bool is_write, sign_extend;
67
68 if ((vcpu->arch.hsr >> 8) & 1) {
69 /* cache operation on I/O addr, tell guest unsupported */
70 kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
71 return 1;
72 }
73
74 if ((vcpu->arch.hsr >> 7) & 1) {
75 /* page table accesses IO mem: tell guest to fix its TTBR */
76 kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
77 return 1;
78 }
79
80 switch ((vcpu->arch.hsr >> 22) & 0x3) {
81 case 0:
82 len = 1;
83 break;
84 case 1:
85 len = 2;
86 break;
87 case 2:
88 len = 4;
89 break;
90 default:
91 kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
92 return -EFAULT;
93 }
94
95 is_write = vcpu->arch.hsr & HSR_WNR;
96 sign_extend = vcpu->arch.hsr & HSR_SSE;
97 rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
98
99 if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
100 /* IO memory trying to read/write pc */
101 kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
102 return 1;
103 }
104
105 mmio->is_write = is_write;
106 mmio->phys_addr = fault_ipa;
107 mmio->len = len;
108 vcpu->arch.mmio_decode.sign_extend = sign_extend;
109 vcpu->arch.mmio_decode.rt = rt;
110
111 /*
112 * The MMIO instruction is emulated and should not be re-executed
113 * in the guest.
114 */
115 kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
116 return 0;
117}
118
119int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
120 phys_addr_t fault_ipa)
121{
122 struct kvm_exit_mmio mmio;
123 unsigned long rt;
124 int ret;
125
126 /*
127 * Prepare MMIO operation. First stash it in a private
128 * structure that we can use for in-kernel emulation. If the
129 * kernel can't handle it, copy it into run->mmio and let user
130 * space do its magic.
131 */
132
133 if (vcpu->arch.hsr & HSR_ISV) {
134 ret = decode_hsr(vcpu, fault_ipa, &mmio);
135 if (ret)
136 return ret;
137 } else {
138 kvm_err("load/store instruction decoding not implemented\n");
139 return -ENOSYS;
140 }
141
142 rt = vcpu->arch.mmio_decode.rt;
143 trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
144 KVM_TRACE_MMIO_READ_UNSATISFIED,
145 mmio.len, fault_ipa,
146 (mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0);
147
148 if (mmio.is_write)
149 memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);
150
151 kvm_prepare_mmio(run, &mmio);
152 return 0;
153}
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
new file mode 100644
index 000000000000..f30e13163a96
--- /dev/null
+++ b/arch/arm/kvm/mmu.c
@@ -0,0 +1,787 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
22#include <trace/events/kvm.h>
23#include <asm/idmap.h>
24#include <asm/pgalloc.h>
25#include <asm/cacheflush.h>
26#include <asm/kvm_arm.h>
27#include <asm/kvm_mmu.h>
28#include <asm/kvm_mmio.h>
29#include <asm/kvm_asm.h>
30#include <asm/kvm_emulate.h>
31#include <asm/mach/map.h>
32#include <trace/events/kvm.h>
33
34#include "trace.h"
35
36extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
37
38static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
39
40static void kvm_tlb_flush_vmid(struct kvm *kvm)
41{
42 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
43}
44
45static void kvm_set_pte(pte_t *pte, pte_t new_pte)
46{
47 pte_val(*pte) = new_pte;
48 /*
49 * flush_pmd_entry just takes a void pointer and cleans the necessary
50 * cache entries, so we can reuse the function for ptes.
51 */
52 flush_pmd_entry(pte);
53}
54
55static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
56 int min, int max)
57{
58 void *page;
59
60 BUG_ON(max > KVM_NR_MEM_OBJS);
61 if (cache->nobjs >= min)
62 return 0;
63 while (cache->nobjs < max) {
64 page = (void *)__get_free_page(PGALLOC_GFP);
65 if (!page)
66 return -ENOMEM;
67 cache->objects[cache->nobjs++] = page;
68 }
69 return 0;
70}
71
72static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
73{
74 while (mc->nobjs)
75 free_page((unsigned long)mc->objects[--mc->nobjs]);
76}
77
78static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
79{
80 void *p;
81
82 BUG_ON(!mc || !mc->nobjs);
83 p = mc->objects[--mc->nobjs];
84 return p;
85}
86
87static void free_ptes(pmd_t *pmd, unsigned long addr)
88{
89 pte_t *pte;
90 unsigned int i;
91
92 for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) {
93 if (!pmd_none(*pmd) && pmd_table(*pmd)) {
94 pte = pte_offset_kernel(pmd, addr);
95 pte_free_kernel(NULL, pte);
96 }
97 pmd++;
98 }
99}
100
101/**
102 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
103 *
104 * Assumes this is a page table used strictly in Hyp-mode and therefore contains
105 * only mappings in the kernel memory area, which is above PAGE_OFFSET.
106 */
107void free_hyp_pmds(void)
108{
109 pgd_t *pgd;
110 pud_t *pud;
111 pmd_t *pmd;
112 unsigned long addr;
113
114 mutex_lock(&kvm_hyp_pgd_mutex);
115 for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) {
116 pgd = hyp_pgd + pgd_index(addr);
117 pud = pud_offset(pgd, addr);
118
119 if (pud_none(*pud))
120 continue;
121 BUG_ON(pud_bad(*pud));
122
123 pmd = pmd_offset(pud, addr);
124 free_ptes(pmd, addr);
125 pmd_free(NULL, pmd);
126 pud_clear(pud);
127 }
128 mutex_unlock(&kvm_hyp_pgd_mutex);
129}
130
131static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
132 unsigned long end)
133{
134 pte_t *pte;
135 unsigned long addr;
136 struct page *page;
137
138 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
139 pte = pte_offset_kernel(pmd, addr);
140 BUG_ON(!virt_addr_valid(addr));
141 page = virt_to_page(addr);
142 kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
143 }
144}
145
146static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
147 unsigned long end,
148 unsigned long *pfn_base)
149{
150 pte_t *pte;
151 unsigned long addr;
152
153 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
154 pte = pte_offset_kernel(pmd, addr);
155 BUG_ON(pfn_valid(*pfn_base));
156 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
157 (*pfn_base)++;
158 }
159}
160
161static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
162 unsigned long end, unsigned long *pfn_base)
163{
164 pmd_t *pmd;
165 pte_t *pte;
166 unsigned long addr, next;
167
168 for (addr = start; addr < end; addr = next) {
169 pmd = pmd_offset(pud, addr);
170
171 BUG_ON(pmd_sect(*pmd));
172
173 if (pmd_none(*pmd)) {
174 pte = pte_alloc_one_kernel(NULL, addr);
175 if (!pte) {
176 kvm_err("Cannot allocate Hyp pte\n");
177 return -ENOMEM;
178 }
179 pmd_populate_kernel(NULL, pmd, pte);
180 }
181
182 next = pmd_addr_end(addr, end);
183
184 /*
185 * If pfn_base is NULL, we map kernel pages into HYP with the
186 * virtual address. Otherwise, this is considered an I/O
187 * mapping and we map the physical region starting at
188 * *pfn_base to [start, end[.
189 */
190 if (!pfn_base)
191 create_hyp_pte_mappings(pmd, addr, next);
192 else
193 create_hyp_io_pte_mappings(pmd, addr, next, pfn_base);
194 }
195
196 return 0;
197}
198
199static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
200{
201 unsigned long start = (unsigned long)from;
202 unsigned long end = (unsigned long)to;
203 pgd_t *pgd;
204 pud_t *pud;
205 pmd_t *pmd;
206 unsigned long addr, next;
207 int err = 0;
208
209 BUG_ON(start > end);
210 if (start < PAGE_OFFSET)
211 return -EINVAL;
212
213 mutex_lock(&kvm_hyp_pgd_mutex);
214 for (addr = start; addr < end; addr = next) {
215 pgd = hyp_pgd + pgd_index(addr);
216 pud = pud_offset(pgd, addr);
217
218 if (pud_none_or_clear_bad(pud)) {
219 pmd = pmd_alloc_one(NULL, addr);
220 if (!pmd) {
221 kvm_err("Cannot allocate Hyp pmd\n");
222 err = -ENOMEM;
223 goto out;
224 }
225 pud_populate(NULL, pud, pmd);
226 }
227
228 next = pgd_addr_end(addr, end);
229 err = create_hyp_pmd_mappings(pud, addr, next, pfn_base);
230 if (err)
231 goto out;
232 }
233out:
234 mutex_unlock(&kvm_hyp_pgd_mutex);
235 return err;
236}
237
238/**
239 * create_hyp_mappings - map a kernel virtual address range in Hyp mode
240 * @from: The virtual kernel start address of the range
241 * @to: The virtual kernel end address of the range (exclusive)
242 *
243 * The same virtual address as the kernel virtual address is also used in
244 * Hyp-mode mapping to the same underlying physical pages.
245 *
246 * Note: Wrapping around zero in the "to" address is not supported.
247 */
248int create_hyp_mappings(void *from, void *to)
249{
250 return __create_hyp_mappings(from, to, NULL);
251}
252
253/**
254 * create_hyp_io_mappings - map a physical IO range in Hyp mode
255 * @from: The virtual HYP start address of the range
256 * @to: The virtual HYP end address of the range (exclusive)
257 * @addr: The physical start address which gets mapped
258 */
259int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
260{
261 unsigned long pfn = __phys_to_pfn(addr);
262 return __create_hyp_mappings(from, to, &pfn);
263}
264
265/**
266 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
267 * @kvm: The KVM struct pointer for the VM.
268 *
269 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
270 * support either full 40-bit input addresses or limited to 32-bit input
271 * addresses). Clears the allocated pages.
272 *
273 * Note we don't need locking here as this is only called when the VM is
274 * created, which can only be done once.
275 */
276int kvm_alloc_stage2_pgd(struct kvm *kvm)
277{
278 pgd_t *pgd;
279
280 if (kvm->arch.pgd != NULL) {
281 kvm_err("kvm_arch already initialized?\n");
282 return -EINVAL;
283 }
284
285 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
286 if (!pgd)
287 return -ENOMEM;
288
289 /* stage-2 pgd must be aligned to its size */
290 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
291
292 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
293 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
294 kvm->arch.pgd = pgd;
295
296 return 0;
297}
298
299static void clear_pud_entry(pud_t *pud)
300{
301 pmd_t *pmd_table = pmd_offset(pud, 0);
302 pud_clear(pud);
303 pmd_free(NULL, pmd_table);
304 put_page(virt_to_page(pud));
305}
306
307static void clear_pmd_entry(pmd_t *pmd)
308{
309 pte_t *pte_table = pte_offset_kernel(pmd, 0);
310 pmd_clear(pmd);
311 pte_free_kernel(NULL, pte_table);
312 put_page(virt_to_page(pmd));
313}
314
315static bool pmd_empty(pmd_t *pmd)
316{
317 struct page *pmd_page = virt_to_page(pmd);
318 return page_count(pmd_page) == 1;
319}
320
321static void clear_pte_entry(pte_t *pte)
322{
323 if (pte_present(*pte)) {
324 kvm_set_pte(pte, __pte(0));
325 put_page(virt_to_page(pte));
326 }
327}
328
329static bool pte_empty(pte_t *pte)
330{
331 struct page *pte_page = virt_to_page(pte);
332 return page_count(pte_page) == 1;
333}
334
335/**
336 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
337 * @kvm: The VM pointer
338 * @start: The intermediate physical base address of the range to unmap
339 * @size: The size of the area to unmap
340 *
341 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
342 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
343 * destroying the VM), otherwise another faulting VCPU may come in and mess
344 * with things behind our backs.
345 */
346static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
347{
348 pgd_t *pgd;
349 pud_t *pud;
350 pmd_t *pmd;
351 pte_t *pte;
352 phys_addr_t addr = start, end = start + size;
353 u64 range;
354
355 while (addr < end) {
356 pgd = kvm->arch.pgd + pgd_index(addr);
357 pud = pud_offset(pgd, addr);
358 if (pud_none(*pud)) {
359 addr += PUD_SIZE;
360 continue;
361 }
362
363 pmd = pmd_offset(pud, addr);
364 if (pmd_none(*pmd)) {
365 addr += PMD_SIZE;
366 continue;
367 }
368
369 pte = pte_offset_kernel(pmd, addr);
370 clear_pte_entry(pte);
371 range = PAGE_SIZE;
372
373 /* If we emptied the pte, walk back up the ladder */
374 if (pte_empty(pte)) {
375 clear_pmd_entry(pmd);
376 range = PMD_SIZE;
377 if (pmd_empty(pmd)) {
378 clear_pud_entry(pud);
379 range = PUD_SIZE;
380 }
381 }
382
383 addr += range;
384 }
385}
386
387/**
388 * kvm_free_stage2_pgd - free all stage-2 tables
389 * @kvm: The KVM struct pointer for the VM.
390 *
391 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
392 * underlying level-2 and level-3 tables before freeing the actual level-1 table
393 * and setting the struct pointer to NULL.
394 *
395 * Note we don't need locking here as this is only called when the VM is
396 * destroyed, which can only be done once.
397 */
398void kvm_free_stage2_pgd(struct kvm *kvm)
399{
400 if (kvm->arch.pgd == NULL)
401 return;
402
403 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
404 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
405 kvm->arch.pgd = NULL;
406}
407
408
409static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
410 phys_addr_t addr, const pte_t *new_pte, bool iomap)
411{
412 pgd_t *pgd;
413 pud_t *pud;
414 pmd_t *pmd;
415 pte_t *pte, old_pte;
416
417 /* Create 2nd stage page table mapping - Level 1 */
418 pgd = kvm->arch.pgd + pgd_index(addr);
419 pud = pud_offset(pgd, addr);
420 if (pud_none(*pud)) {
421 if (!cache)
422 return 0; /* ignore calls from kvm_set_spte_hva */
423 pmd = mmu_memory_cache_alloc(cache);
424 pud_populate(NULL, pud, pmd);
425 pmd += pmd_index(addr);
426 get_page(virt_to_page(pud));
427 } else
428 pmd = pmd_offset(pud, addr);
429
430 /* Create 2nd stage page table mapping - Level 2 */
431 if (pmd_none(*pmd)) {
432 if (!cache)
433 return 0; /* ignore calls from kvm_set_spte_hva */
434 pte = mmu_memory_cache_alloc(cache);
435 clean_pte_table(pte);
436 pmd_populate_kernel(NULL, pmd, pte);
437 pte += pte_index(addr);
438 get_page(virt_to_page(pmd));
439 } else
440 pte = pte_offset_kernel(pmd, addr);
441
442 if (iomap && pte_present(*pte))
443 return -EFAULT;
444
445 /* Create 2nd stage page table mapping - Level 3 */
446 old_pte = *pte;
447 kvm_set_pte(pte, *new_pte);
448 if (pte_present(old_pte))
449 kvm_tlb_flush_vmid(kvm);
450 else
451 get_page(virt_to_page(pte));
452
453 return 0;
454}
455
456/**
457 * kvm_phys_addr_ioremap - map a device range to guest IPA
458 *
459 * @kvm: The KVM pointer
460 * @guest_ipa: The IPA at which to insert the mapping
461 * @pa: The physical address of the device
462 * @size: The size of the mapping
463 */
464int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
465 phys_addr_t pa, unsigned long size)
466{
467 phys_addr_t addr, end;
468 int ret = 0;
469 unsigned long pfn;
470 struct kvm_mmu_memory_cache cache = { 0, };
471
472 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
473 pfn = __phys_to_pfn(pa);
474
475 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
476 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR);
477
478 ret = mmu_topup_memory_cache(&cache, 2, 2);
479 if (ret)
480 goto out;
481 spin_lock(&kvm->mmu_lock);
482 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
483 spin_unlock(&kvm->mmu_lock);
484 if (ret)
485 goto out;
486
487 pfn++;
488 }
489
490out:
491 mmu_free_memory_cache(&cache);
492 return ret;
493}
494
495static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
496{
497 /*
498 * If we are going to insert an instruction page and the icache is
499 * either VIPT or PIPT, there is a potential problem where the host
500 * (or another VM) may have used the same page as this guest, and we
501 * read incorrect data from the icache. If we're using a PIPT cache,
502 * we can invalidate just that page, but if we are using a VIPT cache
503 * we need to invalidate the entire icache - damn shame - as written
504 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
505 *
506 * VIVT caches are tagged using both the ASID and the VMID and doesn't
507 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
508 */
509 if (icache_is_pipt()) {
510 unsigned long hva = gfn_to_hva(kvm, gfn);
511 __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
512 } else if (!icache_is_vivt_asid_tagged()) {
513 /* any kind of VIPT cache */
514 __flush_icache_all();
515 }
516}
517
518static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
519 gfn_t gfn, struct kvm_memory_slot *memslot,
520 unsigned long fault_status)
521{
522 pte_t new_pte;
523 pfn_t pfn;
524 int ret;
525 bool write_fault, writable;
526 unsigned long mmu_seq;
527 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
528
529 write_fault = kvm_is_write_fault(vcpu->arch.hsr);
530 if (fault_status == FSC_PERM && !write_fault) {
531 kvm_err("Unexpected L2 read permission error\n");
532 return -EFAULT;
533 }
534
535 /* We need minimum second+third level pages */
536 ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
537 if (ret)
538 return ret;
539
540 mmu_seq = vcpu->kvm->mmu_notifier_seq;
541 /*
542 * Ensure the read of mmu_notifier_seq happens before we call
543 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
544 * the page we just got a reference to gets unmapped before we have a
545 * chance to grab the mmu_lock, which ensure that if the page gets
546 * unmapped afterwards, the call to kvm_unmap_hva will take it away
547 * from us again properly. This smp_rmb() interacts with the smp_wmb()
548 * in kvm_mmu_notifier_invalidate_<page|range_end>.
549 */
550 smp_rmb();
551
552 pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
553 if (is_error_pfn(pfn))
554 return -EFAULT;
555
556 new_pte = pfn_pte(pfn, PAGE_S2);
557 coherent_icache_guest_page(vcpu->kvm, gfn);
558
559 spin_lock(&vcpu->kvm->mmu_lock);
560 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
561 goto out_unlock;
562 if (writable) {
563 pte_val(new_pte) |= L_PTE_S2_RDWR;
564 kvm_set_pfn_dirty(pfn);
565 }
566 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
567
568out_unlock:
569 spin_unlock(&vcpu->kvm->mmu_lock);
570 kvm_release_pfn_clean(pfn);
571 return 0;
572}
573
574/**
575 * kvm_handle_guest_abort - handles all 2nd stage aborts
576 * @vcpu: the VCPU pointer
577 * @run: the kvm_run structure
578 *
579 * Any abort that gets to the host is almost guaranteed to be caused by a
580 * missing second stage translation table entry, which can mean that either the
581 * guest simply needs more memory and we must allocate an appropriate page or it
582 * can mean that the guest tried to access I/O memory, which is emulated by user
583 * space. The distinction is based on the IPA causing the fault and whether this
584 * memory region has been registered as standard RAM by user space.
585 */
586int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
587{
588 unsigned long hsr_ec;
589 unsigned long fault_status;
590 phys_addr_t fault_ipa;
591 struct kvm_memory_slot *memslot;
592 bool is_iabt;
593 gfn_t gfn;
594 int ret, idx;
595
596 hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT;
597 is_iabt = (hsr_ec == HSR_EC_IABT);
598 fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8;
599
600 trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr,
601 vcpu->arch.hxfar, fault_ipa);
602
603 /* Check the stage-2 fault is trans. fault or write fault */
604 fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE);
605 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
606 kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n",
607 hsr_ec, fault_status);
608 return -EFAULT;
609 }
610
611 idx = srcu_read_lock(&vcpu->kvm->srcu);
612
613 gfn = fault_ipa >> PAGE_SHIFT;
614 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
615 if (is_iabt) {
616 /* Prefetch Abort on I/O address */
617 kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
618 ret = 1;
619 goto out_unlock;
620 }
621
622 if (fault_status != FSC_FAULT) {
623 kvm_err("Unsupported fault status on io memory: %#lx\n",
624 fault_status);
625 ret = -EFAULT;
626 goto out_unlock;
627 }
628
629 /* Adjust page offset */
630 fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK;
631 ret = io_mem_abort(vcpu, run, fault_ipa);
632 goto out_unlock;
633 }
634
635 memslot = gfn_to_memslot(vcpu->kvm, gfn);
636 if (!memslot->user_alloc) {
637 kvm_err("non user-alloc memslots not supported\n");
638 ret = -EINVAL;
639 goto out_unlock;
640 }
641
642 ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
643 if (ret == 0)
644 ret = 1;
645out_unlock:
646 srcu_read_unlock(&vcpu->kvm->srcu, idx);
647 return ret;
648}
649
650static void handle_hva_to_gpa(struct kvm *kvm,
651 unsigned long start,
652 unsigned long end,
653 void (*handler)(struct kvm *kvm,
654 gpa_t gpa, void *data),
655 void *data)
656{
657 struct kvm_memslots *slots;
658 struct kvm_memory_slot *memslot;
659
660 slots = kvm_memslots(kvm);
661
662 /* we only care about the pages that the guest sees */
663 kvm_for_each_memslot(memslot, slots) {
664 unsigned long hva_start, hva_end;
665 gfn_t gfn, gfn_end;
666
667 hva_start = max(start, memslot->userspace_addr);
668 hva_end = min(end, memslot->userspace_addr +
669 (memslot->npages << PAGE_SHIFT));
670 if (hva_start >= hva_end)
671 continue;
672
673 /*
674 * {gfn(page) | page intersects with [hva_start, hva_end)} =
675 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
676 */
677 gfn = hva_to_gfn_memslot(hva_start, memslot);
678 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
679
680 for (; gfn < gfn_end; ++gfn) {
681 gpa_t gpa = gfn << PAGE_SHIFT;
682 handler(kvm, gpa, data);
683 }
684 }
685}
686
687static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
688{
689 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
690 kvm_tlb_flush_vmid(kvm);
691}
692
693int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
694{
695 unsigned long end = hva + PAGE_SIZE;
696
697 if (!kvm->arch.pgd)
698 return 0;
699
700 trace_kvm_unmap_hva(hva);
701 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
702 return 0;
703}
704
705int kvm_unmap_hva_range(struct kvm *kvm,
706 unsigned long start, unsigned long end)
707{
708 if (!kvm->arch.pgd)
709 return 0;
710
711 trace_kvm_unmap_hva_range(start, end);
712 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
713 return 0;
714}
715
716static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
717{
718 pte_t *pte = (pte_t *)data;
719
720 stage2_set_pte(kvm, NULL, gpa, pte, false);
721}
722
723
724void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
725{
726 unsigned long end = hva + PAGE_SIZE;
727 pte_t stage2_pte;
728
729 if (!kvm->arch.pgd)
730 return;
731
732 trace_kvm_set_spte_hva(hva);
733 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
734 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
735}
736
737void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
738{
739 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
740}
741
742phys_addr_t kvm_mmu_get_httbr(void)
743{
744 VM_BUG_ON(!virt_addr_valid(hyp_pgd));
745 return virt_to_phys(hyp_pgd);
746}
747
748int kvm_mmu_init(void)
749{
750 if (!hyp_pgd) {
751 kvm_err("Hyp mode PGD not allocated\n");
752 return -ENOMEM;
753 }
754
755 return 0;
756}
757
758/**
759 * kvm_clear_idmap - remove all idmaps from the hyp pgd
760 *
761 * Free the underlying pmds for all pgds in range and clear the pgds (but
762 * don't free them) afterwards.
763 */
764void kvm_clear_hyp_idmap(void)
765{
766 unsigned long addr, end;
767 unsigned long next;
768 pgd_t *pgd = hyp_pgd;
769 pud_t *pud;
770 pmd_t *pmd;
771
772 addr = virt_to_phys(__hyp_idmap_text_start);
773 end = virt_to_phys(__hyp_idmap_text_end);
774
775 pgd += pgd_index(addr);
776 do {
777 next = pgd_addr_end(addr, end);
778 if (pgd_none_or_clear_bad(pgd))
779 continue;
780 pud = pud_offset(pgd, addr);
781 pmd = pmd_offset(pud, addr);
782
783 pud_clear(pud);
784 clean_pmd_entry(pmd);
785 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
786 } while (pgd++, addr = next, addr < end);
787}
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
new file mode 100644
index 000000000000..7ee5bb7a3667
--- /dev/null
+++ b/arch/arm/kvm/psci.c
@@ -0,0 +1,108 @@
1/*
2 * Copyright (C) 2012 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/kvm_host.h>
19#include <linux/wait.h>
20
21#include <asm/kvm_emulate.h>
22#include <asm/kvm_psci.h>
23
24/*
25 * This is an implementation of the Power State Coordination Interface
26 * as described in ARM document number ARM DEN 0022A.
27 */
28
29static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
30{
31 vcpu->arch.pause = true;
32}
33
34static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
35{
36 struct kvm *kvm = source_vcpu->kvm;
37 struct kvm_vcpu *vcpu;
38 wait_queue_head_t *wq;
39 unsigned long cpu_id;
40 phys_addr_t target_pc;
41
42 cpu_id = *vcpu_reg(source_vcpu, 1);
43 if (vcpu_mode_is_32bit(source_vcpu))
44 cpu_id &= ~((u32) 0);
45
46 if (cpu_id >= atomic_read(&kvm->online_vcpus))
47 return KVM_PSCI_RET_INVAL;
48
49 target_pc = *vcpu_reg(source_vcpu, 2);
50
51 vcpu = kvm_get_vcpu(kvm, cpu_id);
52
53 wq = kvm_arch_vcpu_wq(vcpu);
54 if (!waitqueue_active(wq))
55 return KVM_PSCI_RET_INVAL;
56
57 kvm_reset_vcpu(vcpu);
58
59 /* Gracefully handle Thumb2 entry point */
60 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
61 target_pc &= ~((phys_addr_t) 1);
62 vcpu_set_thumb(vcpu);
63 }
64
65 *vcpu_pc(vcpu) = target_pc;
66 vcpu->arch.pause = false;
67 smp_mb(); /* Make sure the above is visible */
68
69 wake_up_interruptible(wq);
70
71 return KVM_PSCI_RET_SUCCESS;
72}
73
74/**
75 * kvm_psci_call - handle PSCI call if r0 value is in range
76 * @vcpu: Pointer to the VCPU struct
77 *
78 * Handle PSCI calls from guests through traps from HVC or SMC instructions.
79 * The calling convention is similar to SMC calls to the secure world where
80 * the function number is placed in r0 and this function returns true if the
81 * function number specified in r0 is withing the PSCI range, and false
82 * otherwise.
83 */
84bool kvm_psci_call(struct kvm_vcpu *vcpu)
85{
86 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
87 unsigned long val;
88
89 switch (psci_fn) {
90 case KVM_PSCI_FN_CPU_OFF:
91 kvm_psci_vcpu_off(vcpu);
92 val = KVM_PSCI_RET_SUCCESS;
93 break;
94 case KVM_PSCI_FN_CPU_ON:
95 val = kvm_psci_vcpu_on(vcpu);
96 break;
97 case KVM_PSCI_FN_CPU_SUSPEND:
98 case KVM_PSCI_FN_MIGRATE:
99 val = KVM_PSCI_RET_NI;
100 break;
101
102 default:
103 return false;
104 }
105
106 *vcpu_reg(vcpu, 0) = val;
107 return true;
108}
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
new file mode 100644
index 000000000000..b80256b554cd
--- /dev/null
+++ b/arch/arm/kvm/reset.c
@@ -0,0 +1,74 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18#include <linux/compiler.h>
19#include <linux/errno.h>
20#include <linux/sched.h>
21#include <linux/kvm_host.h>
22#include <linux/kvm.h>
23
24#include <asm/unified.h>
25#include <asm/ptrace.h>
26#include <asm/cputype.h>
27#include <asm/kvm_arm.h>
28#include <asm/kvm_coproc.h>
29
30/******************************************************************************
31 * Cortex-A15 Reset Values
32 */
33
34static const int a15_max_cpu_idx = 3;
35
36static struct kvm_regs a15_regs_reset = {
37 .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
38};
39
40
41/*******************************************************************************
42 * Exported reset function
43 */
44
45/**
46 * kvm_reset_vcpu - sets core registers and cp15 registers to reset value
47 * @vcpu: The VCPU pointer
48 *
49 * This function finds the right table above and sets the registers on the
50 * virtual CPU struct to their architectually defined reset values.
51 */
52int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
53{
54 struct kvm_regs *cpu_reset;
55
56 switch (vcpu->arch.target) {
57 case KVM_ARM_TARGET_CORTEX_A15:
58 if (vcpu->vcpu_id > a15_max_cpu_idx)
59 return -EINVAL;
60 cpu_reset = &a15_regs_reset;
61 vcpu->arch.midr = read_cpuid_id();
62 break;
63 default:
64 return -ENODEV;
65 }
66
67 /* Reset core registers */
68 memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs));
69
70 /* Reset CP15 registers */
71 kvm_reset_coprocs(vcpu);
72
73 return 0;
74}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
new file mode 100644
index 000000000000..a8e73ed5ad5b
--- /dev/null
+++ b/arch/arm/kvm/trace.h
@@ -0,0 +1,235 @@
1#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm
8
9/*
10 * Tracepoints for entry/exit to guest
11 */
12TRACE_EVENT(kvm_entry,
13 TP_PROTO(unsigned long vcpu_pc),
14 TP_ARGS(vcpu_pc),
15
16 TP_STRUCT__entry(
17 __field( unsigned long, vcpu_pc )
18 ),
19
20 TP_fast_assign(
21 __entry->vcpu_pc = vcpu_pc;
22 ),
23
24 TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
25);
26
27TRACE_EVENT(kvm_exit,
28 TP_PROTO(unsigned long vcpu_pc),
29 TP_ARGS(vcpu_pc),
30
31 TP_STRUCT__entry(
32 __field( unsigned long, vcpu_pc )
33 ),
34
35 TP_fast_assign(
36 __entry->vcpu_pc = vcpu_pc;
37 ),
38
39 TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
40);
41
42TRACE_EVENT(kvm_guest_fault,
43 TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
44 unsigned long hxfar,
45 unsigned long long ipa),
46 TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
47
48 TP_STRUCT__entry(
49 __field( unsigned long, vcpu_pc )
50 __field( unsigned long, hsr )
51 __field( unsigned long, hxfar )
52 __field( unsigned long long, ipa )
53 ),
54
55 TP_fast_assign(
56 __entry->vcpu_pc = vcpu_pc;
57 __entry->hsr = hsr;
58 __entry->hxfar = hxfar;
59 __entry->ipa = ipa;
60 ),
61
62 TP_printk("guest fault at PC %#08lx (hxfar %#08lx, "
63 "ipa %#16llx, hsr %#08lx",
64 __entry->vcpu_pc, __entry->hxfar,
65 __entry->ipa, __entry->hsr)
66);
67
68TRACE_EVENT(kvm_irq_line,
69 TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
70 TP_ARGS(type, vcpu_idx, irq_num, level),
71
72 TP_STRUCT__entry(
73 __field( unsigned int, type )
74 __field( int, vcpu_idx )
75 __field( int, irq_num )
76 __field( int, level )
77 ),
78
79 TP_fast_assign(
80 __entry->type = type;
81 __entry->vcpu_idx = vcpu_idx;
82 __entry->irq_num = irq_num;
83 __entry->level = level;
84 ),
85
86 TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d",
87 (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" :
88 (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" :
89 (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN",
90 __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level)
91);
92
93TRACE_EVENT(kvm_mmio_emulate,
94 TP_PROTO(unsigned long vcpu_pc, unsigned long instr,
95 unsigned long cpsr),
96 TP_ARGS(vcpu_pc, instr, cpsr),
97
98 TP_STRUCT__entry(
99 __field( unsigned long, vcpu_pc )
100 __field( unsigned long, instr )
101 __field( unsigned long, cpsr )
102 ),
103
104 TP_fast_assign(
105 __entry->vcpu_pc = vcpu_pc;
106 __entry->instr = instr;
107 __entry->cpsr = cpsr;
108 ),
109
110 TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
111 __entry->vcpu_pc, __entry->instr, __entry->cpsr)
112);
113
114/* Architecturally implementation defined CP15 register access */
115TRACE_EVENT(kvm_emulate_cp15_imp,
116 TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn,
117 unsigned long CRm, unsigned long Op2, bool is_write),
118 TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write),
119
120 TP_STRUCT__entry(
121 __field( unsigned int, Op1 )
122 __field( unsigned int, Rt1 )
123 __field( unsigned int, CRn )
124 __field( unsigned int, CRm )
125 __field( unsigned int, Op2 )
126 __field( bool, is_write )
127 ),
128
129 TP_fast_assign(
130 __entry->is_write = is_write;
131 __entry->Op1 = Op1;
132 __entry->Rt1 = Rt1;
133 __entry->CRn = CRn;
134 __entry->CRm = CRm;
135 __entry->Op2 = Op2;
136 ),
137
138 TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u",
139 (__entry->is_write) ? "mcr" : "mrc",
140 __entry->Op1, __entry->Rt1, __entry->CRn,
141 __entry->CRm, __entry->Op2)
142);
143
144TRACE_EVENT(kvm_wfi,
145 TP_PROTO(unsigned long vcpu_pc),
146 TP_ARGS(vcpu_pc),
147
148 TP_STRUCT__entry(
149 __field( unsigned long, vcpu_pc )
150 ),
151
152 TP_fast_assign(
153 __entry->vcpu_pc = vcpu_pc;
154 ),
155
156 TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc)
157);
158
159TRACE_EVENT(kvm_unmap_hva,
160 TP_PROTO(unsigned long hva),
161 TP_ARGS(hva),
162
163 TP_STRUCT__entry(
164 __field( unsigned long, hva )
165 ),
166
167 TP_fast_assign(
168 __entry->hva = hva;
169 ),
170
171 TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
172);
173
174TRACE_EVENT(kvm_unmap_hva_range,
175 TP_PROTO(unsigned long start, unsigned long end),
176 TP_ARGS(start, end),
177
178 TP_STRUCT__entry(
179 __field( unsigned long, start )
180 __field( unsigned long, end )
181 ),
182
183 TP_fast_assign(
184 __entry->start = start;
185 __entry->end = end;
186 ),
187
188 TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
189 __entry->start, __entry->end)
190);
191
192TRACE_EVENT(kvm_set_spte_hva,
193 TP_PROTO(unsigned long hva),
194 TP_ARGS(hva),
195
196 TP_STRUCT__entry(
197 __field( unsigned long, hva )
198 ),
199
200 TP_fast_assign(
201 __entry->hva = hva;
202 ),
203
204 TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
205);
206
207TRACE_EVENT(kvm_hvc,
208 TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
209 TP_ARGS(vcpu_pc, r0, imm),
210
211 TP_STRUCT__entry(
212 __field( unsigned long, vcpu_pc )
213 __field( unsigned long, r0 )
214 __field( unsigned long, imm )
215 ),
216
217 TP_fast_assign(
218 __entry->vcpu_pc = vcpu_pc;
219 __entry->r0 = r0;
220 __entry->imm = imm;
221 ),
222
223 TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx",
224 __entry->vcpu_pc, __entry->r0, __entry->imm)
225);
226
227#endif /* _TRACE_KVM_H */
228
229#undef TRACE_INCLUDE_PATH
230#define TRACE_INCLUDE_PATH arch/arm/kvm
231#undef TRACE_INCLUDE_FILE
232#define TRACE_INCLUDE_FILE trace
233
234/* This part must be outside protection */
235#include <trace/define_trace.h>
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index 9107691adbdb..5ac9e9384b15 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -25,35 +25,44 @@
25 25
26#define DAVINCI_CPUIDLE_MAX_STATES 2 26#define DAVINCI_CPUIDLE_MAX_STATES 2
27 27
28struct davinci_ops { 28static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
29 void (*enter) (u32 flags); 29static void __iomem *ddr2_reg_base;
30 void (*exit) (u32 flags); 30static bool ddr2_pdown;
31 u32 flags; 31
32}; 32static void davinci_save_ddr_power(int enter, bool pdown)
33{
34 u32 val;
35
36 val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);
37
38 if (enter) {
39 if (pdown)
40 val |= DDR2_SRPD_BIT;
41 else
42 val &= ~DDR2_SRPD_BIT;
43 val |= DDR2_LPMODEN_BIT;
44 } else {
45 val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
46 }
47
48 __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
49}
33 50
34/* Actual code that puts the SoC in different idle states */ 51/* Actual code that puts the SoC in different idle states */
35static int davinci_enter_idle(struct cpuidle_device *dev, 52static int davinci_enter_idle(struct cpuidle_device *dev,
36 struct cpuidle_driver *drv, 53 struct cpuidle_driver *drv,
37 int index) 54 int index)
38{ 55{
39 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 56 davinci_save_ddr_power(1, ddr2_pdown);
40 struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
41
42 if (ops && ops->enter)
43 ops->enter(ops->flags);
44 57
45 index = cpuidle_wrap_enter(dev, drv, index, 58 index = cpuidle_wrap_enter(dev, drv, index,
46 arm_cpuidle_simple_enter); 59 arm_cpuidle_simple_enter);
47 60
48 if (ops && ops->exit) 61 davinci_save_ddr_power(0, ddr2_pdown);
49 ops->exit(ops->flags);
50 62
51 return index; 63 return index;
52} 64}
53 65
54/* fields in davinci_ops.flags */
55#define DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN BIT(0)
56
57static struct cpuidle_driver davinci_idle_driver = { 66static struct cpuidle_driver davinci_idle_driver = {
58 .name = "cpuidle-davinci", 67 .name = "cpuidle-davinci",
59 .owner = THIS_MODULE, 68 .owner = THIS_MODULE,
@@ -70,45 +79,6 @@ static struct cpuidle_driver davinci_idle_driver = {
70 .state_count = DAVINCI_CPUIDLE_MAX_STATES, 79 .state_count = DAVINCI_CPUIDLE_MAX_STATES,
71}; 80};
72 81
73static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
74static void __iomem *ddr2_reg_base;
75
76static void davinci_save_ddr_power(int enter, bool pdown)
77{
78 u32 val;
79
80 val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);
81
82 if (enter) {
83 if (pdown)
84 val |= DDR2_SRPD_BIT;
85 else
86 val &= ~DDR2_SRPD_BIT;
87 val |= DDR2_LPMODEN_BIT;
88 } else {
89 val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
90 }
91
92 __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
93}
94
95static void davinci_c2state_enter(u32 flags)
96{
97 davinci_save_ddr_power(1, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
98}
99
100static void davinci_c2state_exit(u32 flags)
101{
102 davinci_save_ddr_power(0, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
103}
104
105static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = {
106 [1] = {
107 .enter = davinci_c2state_enter,
108 .exit = davinci_c2state_exit,
109 },
110};
111
112static int __init davinci_cpuidle_probe(struct platform_device *pdev) 82static int __init davinci_cpuidle_probe(struct platform_device *pdev)
113{ 83{
114 int ret; 84 int ret;
@@ -124,11 +94,7 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
124 94
125 ddr2_reg_base = pdata->ddr2_ctlr_base; 95 ddr2_reg_base = pdata->ddr2_ctlr_base;
126 96
127 if (pdata->ddr2_pdown) 97 ddr2_pdown = pdata->ddr2_pdown;
128 davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
129 cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);
130
131 device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
132 98
133 ret = cpuidle_register_driver(&davinci_idle_driver); 99 ret = cpuidle_register_driver(&davinci_idle_driver);
134 if (ret) { 100 if (ret) {
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index e103c290bc9e..85afb031b676 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT
414 select CPU_EXYNOS4210 414 select CPU_EXYNOS4210
415 select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD 415 select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD
416 select PINCTRL 416 select PINCTRL
417 select PINCTRL_EXYNOS4 417 select PINCTRL_EXYNOS
418 select USE_OF 418 select USE_OF
419 help 419 help
420 Machine support for Samsung Exynos4 machine with device tree enabled. 420 Machine support for Samsung Exynos4 machine with device tree enabled.
diff --git a/arch/arm/mach-exynos/include/mach/cpufreq.h b/arch/arm/mach-exynos/include/mach/cpufreq.h
index 7517c3f417af..b5d39dd03b2a 100644
--- a/arch/arm/mach-exynos/include/mach/cpufreq.h
+++ b/arch/arm/mach-exynos/include/mach/cpufreq.h
@@ -18,12 +18,25 @@ enum cpufreq_level_index {
18 L20, 18 L20,
19}; 19};
20 20
21#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \
22 { \
23 .freq = (f) * 1000, \
24 .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \
25 (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \
26 .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \
27 .mps = ((m) << 16 | (p) << 8 | (s)), \
28 }
29
30struct apll_freq {
31 unsigned int freq;
32 u32 clk_div_cpu0;
33 u32 clk_div_cpu1;
34 u32 mps;
35};
36
21struct exynos_dvfs_info { 37struct exynos_dvfs_info {
22 unsigned long mpll_freq_khz; 38 unsigned long mpll_freq_khz;
23 unsigned int pll_safe_idx; 39 unsigned int pll_safe_idx;
24 unsigned int pm_lock_idx;
25 unsigned int max_support_idx;
26 unsigned int min_support_idx;
27 struct clk *cpu_clk; 40 struct clk *cpu_clk;
28 unsigned int *volt_table; 41 unsigned int *volt_table;
29 struct cpufreq_frequency_table *freq_table; 42 struct cpufreq_frequency_table *freq_table;
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c
index e99d3d8f2bcf..ea9e3020972d 100644
--- a/arch/arm/mach-exynos/mach-exynos5-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos5-dt.c
@@ -104,6 +104,12 @@ static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = {
104 OF_DEV_AUXDATA("samsung,mfc-v6", 0x11000000, "s5p-mfc-v6", NULL), 104 OF_DEV_AUXDATA("samsung,mfc-v6", 0x11000000, "s5p-mfc-v6", NULL),
105 OF_DEV_AUXDATA("samsung,exynos5250-tmu", 0x10060000, 105 OF_DEV_AUXDATA("samsung,exynos5250-tmu", 0x10060000,
106 "exynos-tmu", NULL), 106 "exynos-tmu", NULL),
107 OF_DEV_AUXDATA("samsung,i2s-v5", 0x03830000,
108 "samsung-i2s.0", NULL),
109 OF_DEV_AUXDATA("samsung,i2s-v5", 0x12D60000,
110 "samsung-i2s.1", NULL),
111 OF_DEV_AUXDATA("samsung,i2s-v5", 0x12D70000,
112 "samsung-i2s.2", NULL),
107 {}, 113 {},
108}; 114};
109 115
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig
index 551c97e87a78..44b12f9c1584 100644
--- a/arch/arm/mach-highbank/Kconfig
+++ b/arch/arm/mach-highbank/Kconfig
@@ -1,5 +1,7 @@
1config ARCH_HIGHBANK 1config ARCH_HIGHBANK
2 bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7 2 bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7
3 select ARCH_HAS_CPUFREQ
4 select ARCH_HAS_OPP
3 select ARCH_WANT_OPTIONAL_GPIOLIB 5 select ARCH_WANT_OPTIONAL_GPIOLIB
4 select ARM_AMBA 6 select ARM_AMBA
5 select ARM_GIC 7 select ARM_GIC
@@ -11,5 +13,7 @@ config ARCH_HIGHBANK
11 select GENERIC_CLOCKEVENTS 13 select GENERIC_CLOCKEVENTS
12 select HAVE_ARM_SCU 14 select HAVE_ARM_SCU
13 select HAVE_SMP 15 select HAVE_SMP
16 select MAILBOX
17 select PL320_MBOX
14 select SPARSE_IRQ 18 select SPARSE_IRQ
15 select USE_OF 19 select USE_OF
diff --git a/arch/arm/mach-highbank/core.h b/arch/arm/mach-highbank/core.h
index 80235b46cb58..3f65206a9b92 100644
--- a/arch/arm/mach-highbank/core.h
+++ b/arch/arm/mach-highbank/core.h
@@ -2,7 +2,6 @@
2#define __HIGHBANK_CORE_H 2#define __HIGHBANK_CORE_H
3 3
4extern void highbank_set_cpu_jump(int cpu, void *jump_addr); 4extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
5extern void highbank_clocks_init(void);
6extern void highbank_restart(char, const char *); 5extern void highbank_restart(char, const char *);
7extern void __iomem *scu_base_addr; 6extern void __iomem *scu_base_addr;
8 7
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 981dc1e1da51..65656ff0eb33 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -25,9 +25,11 @@
25#include <linux/of_address.h> 25#include <linux/of_address.h>
26#include <linux/smp.h> 26#include <linux/smp.h>
27#include <linux/amba/bus.h> 27#include <linux/amba/bus.h>
28#include <linux/clk-provider.h>
28 29
29#include <asm/arch_timer.h> 30#include <asm/arch_timer.h>
30#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
32#include <asm/cputype.h>
31#include <asm/smp_plat.h> 33#include <asm/smp_plat.h>
32#include <asm/smp_twd.h> 34#include <asm/smp_twd.h>
33#include <asm/hardware/arm_timer.h> 35#include <asm/hardware/arm_timer.h>
@@ -59,7 +61,7 @@ static void __init highbank_scu_map_io(void)
59 61
60void highbank_set_cpu_jump(int cpu, void *jump_addr) 62void highbank_set_cpu_jump(int cpu, void *jump_addr)
61{ 63{
62 cpu = cpu_logical_map(cpu); 64 cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
63 writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu)); 65 writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu));
64 __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); 66 __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
65 outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), 67 outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
@@ -116,7 +118,7 @@ static void __init highbank_timer_init(void)
116 WARN_ON(!timer_base); 118 WARN_ON(!timer_base);
117 irq = irq_of_parse_and_map(np, 0); 119 irq = irq_of_parse_and_map(np, 0);
118 120
119 highbank_clocks_init(); 121 of_clk_init(NULL);
120 lookup.clk = of_clk_get(np, 0); 122 lookup.clk = of_clk_get(np, 0);
121 clkdev_add(&lookup); 123 clkdev_add(&lookup);
122 124
diff --git a/arch/arm/mach-highbank/sysregs.h b/arch/arm/mach-highbank/sysregs.h
index 70af9d13fcef..5995df7f2622 100644
--- a/arch/arm/mach-highbank/sysregs.h
+++ b/arch/arm/mach-highbank/sysregs.h
@@ -37,7 +37,7 @@ extern void __iomem *sregs_base;
37 37
38static inline void highbank_set_core_pwr(void) 38static inline void highbank_set_core_pwr(void)
39{ 39{
40 int cpu = cpu_logical_map(smp_processor_id()); 40 int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
41 if (scu_base_addr) 41 if (scu_base_addr)
42 scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); 42 scu_power_mode(scu_base_addr, SCU_PM_POWEROFF);
43 else 43 else
@@ -46,7 +46,7 @@ static inline void highbank_set_core_pwr(void)
46 46
47static inline void highbank_clear_core_pwr(void) 47static inline void highbank_clear_core_pwr(void)
48{ 48{
49 int cpu = cpu_logical_map(smp_processor_id()); 49 int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
50 if (scu_base_addr) 50 if (scu_base_addr)
51 scu_power_mode(scu_base_addr, SCU_PM_NORMAL); 51 scu_power_mode(scu_base_addr, SCU_PM_NORMAL);
52 else 52 else
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index 4815ea6f8f5d..1337f2c51f9e 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -27,6 +27,7 @@
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/gpio.h> 29#include <linux/gpio.h>
30#include <linux/usb/phy.h>
30 31
31#include <asm/mach-types.h> 32#include <asm/mach-types.h>
32#include <asm/mach/arch.h> 33#include <asm/mach/arch.h>
@@ -263,6 +264,7 @@ static void __init omap_2430sdp_init(void)
263 omap_hsmmc_init(mmc); 264 omap_hsmmc_init(mmc);
264 265
265 omap_mux_init_signal("usb0hs_stp", OMAP_PULL_ENA | OMAP_PULL_UP); 266 omap_mux_init_signal("usb0hs_stp", OMAP_PULL_ENA | OMAP_PULL_UP);
267 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
266 usb_musb_init(NULL); 268 usb_musb_init(NULL);
267 269
268 board_smc91x_init(); 270 board_smc91x_init();
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index bb73afc9ac17..8a2e242910eb 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -25,6 +25,7 @@
25#include <linux/gpio.h> 25#include <linux/gpio.h>
26#include <linux/mmc/host.h> 26#include <linux/mmc/host.h>
27#include <linux/platform_data/spi-omap2-mcspi.h> 27#include <linux/platform_data/spi-omap2-mcspi.h>
28#include <linux/usb/phy.h>
28 29
29#include <asm/mach-types.h> 30#include <asm/mach-types.h>
30#include <asm/mach/arch.h> 31#include <asm/mach/arch.h>
@@ -579,6 +580,7 @@ static void __init omap_3430sdp_init(void)
579 omap_ads7846_init(1, gpio_pendown, 310, NULL); 580 omap_ads7846_init(1, gpio_pendown, 310, NULL);
580 omap_serial_init(); 581 omap_serial_init();
581 omap_sdrc_init(hyb18m512160af6_sdrc_params, NULL); 582 omap_sdrc_init(hyb18m512160af6_sdrc_params, NULL);
583 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
582 usb_musb_init(NULL); 584 usb_musb_init(NULL);
583 board_smc91x_init(); 585 board_smc91x_init();
584 board_flash_init(sdp_flash_partitions, chip_sel_3430, 0); 586 board_flash_init(sdp_flash_partitions, chip_sel_3430, 0);
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 1cc6696594fd..8e8efccf762f 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -28,6 +28,7 @@
28#include <linux/leds_pwm.h> 28#include <linux/leds_pwm.h>
29#include <linux/platform_data/omap4-keypad.h> 29#include <linux/platform_data/omap4-keypad.h>
30#include <linux/usb/musb.h> 30#include <linux/usb/musb.h>
31#include <linux/usb/phy.h>
31 32
32#include <asm/hardware/gic.h> 33#include <asm/hardware/gic.h>
33#include <asm/mach-types.h> 34#include <asm/mach-types.h>
@@ -696,6 +697,7 @@ static void __init omap_4430sdp_init(void)
696 omap4_sdp4430_wifi_init(); 697 omap4_sdp4430_wifi_init();
697 omap4_twl6030_hsmmc_init(mmc); 698 omap4_twl6030_hsmmc_init(mmc);
698 699
700 usb_bind_phy("musb-hdrc.0.auto", 0, "omap-usb2.1.auto");
699 usb_musb_init(&musb_board_data); 701 usb_musb_init(&musb_board_data);
700 702
701 status = omap_ethernet_init(); 703 status = omap_ethernet_init();
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index b3102c2f4a3c..f1172f2f1a7e 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -30,6 +30,7 @@
30#include <linux/regulator/fixed.h> 30#include <linux/regulator/fixed.h>
31#include <linux/regulator/machine.h> 31#include <linux/regulator/machine.h>
32#include <linux/mmc/host.h> 32#include <linux/mmc/host.h>
33#include <linux/usb/phy.h>
33 34
34#include <linux/spi/spi.h> 35#include <linux/spi/spi.h>
35#include <linux/spi/tdo24m.h> 36#include <linux/spi/tdo24m.h>
@@ -724,6 +725,7 @@ static void __init cm_t3x_common_init(void)
724 cm_t35_init_display(); 725 cm_t35_init_display();
725 omap_twl4030_audio_init("cm-t3x"); 726 omap_twl4030_audio_init("cm-t3x");
726 727
728 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
727 usb_musb_init(NULL); 729 usb_musb_init(NULL);
728 cm_t35_init_usbh(); 730 cm_t35_init_usbh();
729 cm_t35_init_camera(); 731 cm_t35_init_camera();
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 12865af25d3a..77cade52b022 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -29,6 +29,7 @@
29#include <linux/mtd/partitions.h> 29#include <linux/mtd/partitions.h>
30#include <linux/mtd/nand.h> 30#include <linux/mtd/nand.h>
31#include <linux/mmc/host.h> 31#include <linux/mmc/host.h>
32#include <linux/usb/phy.h>
32 33
33#include <linux/regulator/machine.h> 34#include <linux/regulator/machine.h>
34#include <linux/i2c/twl.h> 35#include <linux/i2c/twl.h>
@@ -622,6 +623,7 @@ static void __init devkit8000_init(void)
622 623
623 omap_ads7846_init(2, OMAP3_DEVKIT_TS_GPIO, 0, NULL); 624 omap_ads7846_init(2, OMAP3_DEVKIT_TS_GPIO, 0, NULL);
624 625
626 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
625 usb_musb_init(NULL); 627 usb_musb_init(NULL);
626 usbhs_init(&usbhs_bdata); 628 usbhs_init(&usbhs_bdata);
627 board_nand_init(devkit8000_nand_partitions, 629 board_nand_init(devkit8000_nand_partitions,
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 0f24cb84ba5a..15e58815a131 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -18,6 +18,7 @@
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/input.h> 20#include <linux/input.h>
21#include <linux/usb/phy.h>
21 22
22#include <linux/regulator/machine.h> 23#include <linux/regulator/machine.h>
23#include <linux/regulator/fixed.h> 24#include <linux/regulator/fixed.h>
@@ -625,6 +626,7 @@ static void __init igep_init(void)
625 omap_serial_init(); 626 omap_serial_init();
626 omap_sdrc_init(m65kxxxxam_sdrc_params, 627 omap_sdrc_init(m65kxxxxam_sdrc_params,
627 m65kxxxxam_sdrc_params); 628 m65kxxxxam_sdrc_params);
629 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
628 usb_musb_init(NULL); 630 usb_musb_init(NULL);
629 631
630 igep_flash_init(); 632 igep_flash_init();
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 0869f4f3d3e1..3b5510a433f0 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -28,6 +28,7 @@
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/smsc911x.h> 29#include <linux/smsc911x.h>
30#include <linux/mmc/host.h> 30#include <linux/mmc/host.h>
31#include <linux/usb/phy.h>
31#include <linux/platform_data/spi-omap2-mcspi.h> 32#include <linux/platform_data/spi-omap2-mcspi.h>
32 33
33#include <asm/mach-types.h> 34#include <asm/mach-types.h>
@@ -418,6 +419,7 @@ static void __init omap_ldp_init(void)
418 omap_ads7846_init(1, 54, 310, NULL); 419 omap_ads7846_init(1, 54, 310, NULL);
419 omap_serial_init(); 420 omap_serial_init();
420 omap_sdrc_init(NULL, NULL); 421 omap_sdrc_init(NULL, NULL);
422 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
421 usb_musb_init(NULL); 423 usb_musb_init(NULL);
422 board_nand_init(ldp_nand_partitions, ARRAY_SIZE(ldp_nand_partitions), 424 board_nand_init(ldp_nand_partitions, ARRAY_SIZE(ldp_nand_partitions),
423 ZOOM_NAND_CS, 0, nand_default_timings); 425 ZOOM_NAND_CS, 0, nand_default_timings);
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 22c483d5dfa8..4616f9269d00 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -30,6 +30,7 @@
30#include <linux/mtd/partitions.h> 30#include <linux/mtd/partitions.h>
31#include <linux/mtd/nand.h> 31#include <linux/mtd/nand.h>
32#include <linux/mmc/host.h> 32#include <linux/mmc/host.h>
33#include <linux/usb/phy.h>
33 34
34#include <linux/regulator/machine.h> 35#include <linux/regulator/machine.h>
35#include <linux/i2c/twl.h> 36#include <linux/i2c/twl.h>
@@ -519,6 +520,7 @@ static void __init omap3_beagle_init(void)
519 omap_sdrc_init(mt46h32m32lf6_sdrc_params, 520 omap_sdrc_init(mt46h32m32lf6_sdrc_params,
520 mt46h32m32lf6_sdrc_params); 521 mt46h32m32lf6_sdrc_params);
521 522
523 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
522 usb_musb_init(NULL); 524 usb_musb_init(NULL);
523 usbhs_init(&usbhs_bdata); 525 usbhs_init(&usbhs_bdata);
524 board_nand_init(omap3beagle_nand_partitions, 526 board_nand_init(omap3beagle_nand_partitions,
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 3985f35aee06..202389503457 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -41,6 +41,7 @@
41#include <linux/regulator/machine.h> 41#include <linux/regulator/machine.h>
42#include <linux/mmc/host.h> 42#include <linux/mmc/host.h>
43#include <linux/export.h> 43#include <linux/export.h>
44#include <linux/usb/phy.h>
44 45
45#include <asm/mach-types.h> 46#include <asm/mach-types.h>
46#include <asm/mach/arch.h> 47#include <asm/mach/arch.h>
@@ -309,7 +310,7 @@ static struct omap2_hsmmc_info mmc[] = {
309 .gpio_wp = 63, 310 .gpio_wp = 63,
310 .deferred = true, 311 .deferred = true,
311 }, 312 },
312#ifdef CONFIG_WL12XX_PLATFORM_DATA 313#ifdef CONFIG_WILINK_PLATFORM_DATA
313 { 314 {
314 .name = "wl1271", 315 .name = "wl1271",
315 .mmc = 2, 316 .mmc = 2,
@@ -450,7 +451,7 @@ static struct regulator_init_data omap3evm_vio = {
450 .consumer_supplies = omap3evm_vio_supply, 451 .consumer_supplies = omap3evm_vio_supply,
451}; 452};
452 453
453#ifdef CONFIG_WL12XX_PLATFORM_DATA 454#ifdef CONFIG_WILINK_PLATFORM_DATA
454 455
455#define OMAP3EVM_WLAN_PMENA_GPIO (150) 456#define OMAP3EVM_WLAN_PMENA_GPIO (150)
456#define OMAP3EVM_WLAN_IRQ_GPIO (149) 457#define OMAP3EVM_WLAN_IRQ_GPIO (149)
@@ -563,7 +564,7 @@ static struct omap_board_mux omap35x_board_mux[] __initdata = {
563 OMAP_PIN_OFF_NONE), 564 OMAP_PIN_OFF_NONE),
564 OMAP3_MUX(GPMC_WAIT2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP | 565 OMAP3_MUX(GPMC_WAIT2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
565 OMAP_PIN_OFF_NONE), 566 OMAP_PIN_OFF_NONE),
566#ifdef CONFIG_WL12XX_PLATFORM_DATA 567#ifdef CONFIG_WILINK_PLATFORM_DATA
567 /* WLAN IRQ - GPIO 149 */ 568 /* WLAN IRQ - GPIO 149 */
568 OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), 569 OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
569 570
@@ -601,7 +602,7 @@ static struct omap_board_mux omap36x_board_mux[] __initdata = {
601 OMAP3_MUX(SYS_BOOT4, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), 602 OMAP3_MUX(SYS_BOOT4, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
602 OMAP3_MUX(SYS_BOOT5, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), 603 OMAP3_MUX(SYS_BOOT5, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
603 OMAP3_MUX(SYS_BOOT6, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), 604 OMAP3_MUX(SYS_BOOT6, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
604#ifdef CONFIG_WL12XX_PLATFORM_DATA 605#ifdef CONFIG_WILINK_PLATFORM_DATA
605 /* WLAN IRQ - GPIO 149 */ 606 /* WLAN IRQ - GPIO 149 */
606 OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), 607 OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
607 608
@@ -637,7 +638,7 @@ static struct gpio omap3_evm_ehci_gpios[] __initdata = {
637 638
638static void __init omap3_evm_wl12xx_init(void) 639static void __init omap3_evm_wl12xx_init(void)
639{ 640{
640#ifdef CONFIG_WL12XX_PLATFORM_DATA 641#ifdef CONFIG_WILINK_PLATFORM_DATA
641 int ret; 642 int ret;
642 643
643 /* WL12xx WLAN Init */ 644 /* WL12xx WLAN Init */
@@ -734,6 +735,7 @@ static void __init omap3_evm_init(void)
734 omap_mux_init_gpio(135, OMAP_PIN_OUTPUT); 735 omap_mux_init_gpio(135, OMAP_PIN_OUTPUT);
735 usbhs_bdata.reset_gpio_port[1] = 135; 736 usbhs_bdata.reset_gpio_port[1] = 135;
736 } 737 }
738 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
737 usb_musb_init(&musb_board_data); 739 usb_musb_init(&musb_board_data);
738 usbhs_init(&usbhs_bdata); 740 usbhs_init(&usbhs_bdata);
739 board_nand_init(omap3evm_nand_partitions, 741 board_nand_init(omap3evm_nand_partitions,
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
index 2a065ba6eb58..9409eb897e2f 100644
--- a/arch/arm/mach-omap2/board-omap3logic.c
+++ b/arch/arm/mach-omap2/board-omap3logic.c
@@ -29,6 +29,7 @@
29 29
30#include <linux/i2c/twl.h> 30#include <linux/i2c/twl.h>
31#include <linux/mmc/host.h> 31#include <linux/mmc/host.h>
32#include <linux/usb/phy.h>
32 33
33#include <asm/mach-types.h> 34#include <asm/mach-types.h>
34#include <asm/mach/arch.h> 35#include <asm/mach/arch.h>
@@ -215,6 +216,7 @@ static void __init omap3logic_init(void)
215 board_mmc_init(); 216 board_mmc_init();
216 board_smsc911x_init(); 217 board_smsc911x_init();
217 218
219 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
218 usb_musb_init(NULL); 220 usb_musb_init(NULL);
219 221
220 /* Ensure SDRC pins are mux'd for self-refresh */ 222 /* Ensure SDRC pins are mux'd for self-refresh */
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index a53a6683c1b8..1ac3e81969e0 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -35,6 +35,7 @@
35#include <linux/mmc/host.h> 35#include <linux/mmc/host.h>
36#include <linux/mmc/card.h> 36#include <linux/mmc/card.h>
37#include <linux/regulator/fixed.h> 37#include <linux/regulator/fixed.h>
38#include <linux/usb/phy.h>
38#include <linux/platform_data/spi-omap2-mcspi.h> 39#include <linux/platform_data/spi-omap2-mcspi.h>
39 40
40#include <asm/mach-types.h> 41#include <asm/mach-types.h>
@@ -601,6 +602,7 @@ static void __init omap3pandora_init(void)
601 ARRAY_SIZE(omap3pandora_spi_board_info)); 602 ARRAY_SIZE(omap3pandora_spi_board_info));
602 omap_ads7846_init(1, OMAP3_PANDORA_TS_GPIO, 0, NULL); 603 omap_ads7846_init(1, OMAP3_PANDORA_TS_GPIO, 0, NULL);
603 usbhs_init(&usbhs_bdata); 604 usbhs_init(&usbhs_bdata);
605 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
604 usb_musb_init(NULL); 606 usb_musb_init(NULL);
605 gpmc_nand_init(&pandora_nand_data, NULL); 607 gpmc_nand_init(&pandora_nand_data, NULL);
606 608
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index 53a6cbcf9747..63cb204e0811 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -33,6 +33,7 @@
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/smsc911x.h> 34#include <linux/smsc911x.h>
35#include <linux/i2c/at24.h> 35#include <linux/i2c/at24.h>
36#include <linux/usb/phy.h>
36 37
37#include <asm/mach-types.h> 38#include <asm/mach-types.h>
38#include <asm/mach/arch.h> 39#include <asm/mach/arch.h>
@@ -404,6 +405,7 @@ static void __init omap3_stalker_init(void)
404 405
405 omap_serial_init(); 406 omap_serial_init();
406 omap_sdrc_init(mt46h32m32lf6_sdrc_params, NULL); 407 omap_sdrc_init(mt46h32m32lf6_sdrc_params, NULL);
408 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
407 usb_musb_init(NULL); 409 usb_musb_init(NULL);
408 usbhs_init(&usbhs_bdata); 410 usbhs_init(&usbhs_bdata);
409 omap_ads7846_init(1, OMAP3_STALKER_TS_GPIO, 310, NULL); 411 omap_ads7846_init(1, OMAP3_STALKER_TS_GPIO, 310, NULL);
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index 263cb9cfbf37..6b22ce3581d7 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -28,6 +28,7 @@
28#include <linux/mtd/partitions.h> 28#include <linux/mtd/partitions.h>
29#include <linux/mtd/nand.h> 29#include <linux/mtd/nand.h>
30#include <linux/mmc/host.h> 30#include <linux/mmc/host.h>
31#include <linux/usb/phy.h>
31 32
32#include <linux/platform_data/spi-omap2-mcspi.h> 33#include <linux/platform_data/spi-omap2-mcspi.h>
33#include <linux/spi/spi.h> 34#include <linux/spi/spi.h>
@@ -365,6 +366,7 @@ static void __init omap3_touchbook_init(void)
365 366
366 /* Touchscreen and accelerometer */ 367 /* Touchscreen and accelerometer */
367 omap_ads7846_init(4, OMAP3_TS_GPIO, 310, &ads7846_pdata); 368 omap_ads7846_init(4, OMAP3_TS_GPIO, 310, &ads7846_pdata);
369 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
368 usb_musb_init(NULL); 370 usb_musb_init(NULL);
369 usbhs_init(&usbhs_bdata); 371 usbhs_init(&usbhs_bdata);
370 board_nand_init(omap3touchbook_nand_partitions, 372 board_nand_init(omap3touchbook_nand_partitions,
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 769c1feee1c4..40184cc494f9 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -30,6 +30,7 @@
30#include <linux/regulator/fixed.h> 30#include <linux/regulator/fixed.h>
31#include <linux/ti_wilink_st.h> 31#include <linux/ti_wilink_st.h>
32#include <linux/usb/musb.h> 32#include <linux/usb/musb.h>
33#include <linux/usb/phy.h>
33#include <linux/wl12xx.h> 34#include <linux/wl12xx.h>
34#include <linux/platform_data/omap-abe-twl6040.h> 35#include <linux/platform_data/omap-abe-twl6040.h>
35 36
@@ -447,6 +448,7 @@ static void __init omap4_panda_init(void)
447 omap_sdrc_init(NULL, NULL); 448 omap_sdrc_init(NULL, NULL);
448 omap4_twl6030_hsmmc_init(mmc); 449 omap4_twl6030_hsmmc_init(mmc);
449 omap4_ehci_init(); 450 omap4_ehci_init();
451 usb_bind_phy("musb-hdrc.0.auto", 0, "omap-usb2.1.auto");
450 usb_musb_init(&musb_board_data); 452 usb_musb_init(&musb_board_data);
451 omap4_panda_display_init(); 453 omap4_panda_display_init();
452} 454}
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index c8fde3e56441..7e43ff3f704c 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -36,6 +36,7 @@
36#include <linux/mtd/nand.h> 36#include <linux/mtd/nand.h>
37#include <linux/mtd/partitions.h> 37#include <linux/mtd/partitions.h>
38#include <linux/mmc/host.h> 38#include <linux/mmc/host.h>
39#include <linux/usb/phy.h>
39 40
40#include <linux/platform_data/mtd-nand-omap2.h> 41#include <linux/platform_data/mtd-nand-omap2.h>
41#include <linux/platform_data/spi-omap2-mcspi.h> 42#include <linux/platform_data/spi-omap2-mcspi.h>
@@ -499,6 +500,7 @@ static void __init overo_init(void)
499 mt46h32m32lf6_sdrc_params); 500 mt46h32m32lf6_sdrc_params);
500 board_nand_init(overo_nand_partitions, 501 board_nand_init(overo_nand_partitions,
501 ARRAY_SIZE(overo_nand_partitions), NAND_CS, 0, NULL); 502 ARRAY_SIZE(overo_nand_partitions), NAND_CS, 0, NULL);
503 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
502 usb_musb_init(NULL); 504 usb_musb_init(NULL);
503 usbhs_init(&usbhs_bdata); 505 usbhs_init(&usbhs_bdata);
504 overo_spi_init(); 506 overo_spi_init();
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
index 0c777b75e484..f8a272c253f5 100644
--- a/arch/arm/mach-omap2/board-rm680.c
+++ b/arch/arm/mach-omap2/board-rm680.c
@@ -18,6 +18,7 @@
18#include <linux/regulator/machine.h> 18#include <linux/regulator/machine.h>
19#include <linux/regulator/consumer.h> 19#include <linux/regulator/consumer.h>
20#include <linux/platform_data/mtd-onenand-omap2.h> 20#include <linux/platform_data/mtd-onenand-omap2.h>
21#include <linux/usb/phy.h>
21 22
22#include <asm/mach/arch.h> 23#include <asm/mach/arch.h>
23#include <asm/mach-types.h> 24#include <asm/mach-types.h>
@@ -134,6 +135,7 @@ static void __init rm680_init(void)
134 sdrc_params = nokia_get_sdram_timings(); 135 sdrc_params = nokia_get_sdram_timings();
135 omap_sdrc_init(sdrc_params, sdrc_params); 136 omap_sdrc_init(sdrc_params, sdrc_params);
136 137
138 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
137 usb_musb_init(NULL); 139 usb_musb_init(NULL);
138 rm680_peripherals_init(); 140 rm680_peripherals_init();
139} 141}
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index cf07e289b4ea..f3d075baebb6 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -42,7 +42,7 @@
42#include <media/si4713.h> 42#include <media/si4713.h>
43#include <linux/leds-lp5523.h> 43#include <linux/leds-lp5523.h>
44 44
45#include <../drivers/staging/iio/light/tsl2563.h> 45#include <linux/platform_data/tsl2563.h>
46#include <linux/lis3lv02d.h> 46#include <linux/lis3lv02d.h>
47 47
48#if defined(CONFIG_IR_RX51) || defined(CONFIG_IR_RX51_MODULE) 48#if defined(CONFIG_IR_RX51) || defined(CONFIG_IR_RX51_MODULE)
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 26e07addc9d7..dc5498b1b3a7 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -20,6 +20,7 @@
20#include <linux/wl12xx.h> 20#include <linux/wl12xx.h>
21#include <linux/mmc/host.h> 21#include <linux/mmc/host.h>
22#include <linux/platform_data/gpio-omap.h> 22#include <linux/platform_data/gpio-omap.h>
23#include <linux/usb/phy.h>
23 24
24#include <asm/mach-types.h> 25#include <asm/mach-types.h>
25#include <asm/mach/arch.h> 26#include <asm/mach/arch.h>
@@ -298,6 +299,7 @@ void __init zoom_peripherals_init(void)
298 omap_hsmmc_init(mmc); 299 omap_hsmmc_init(mmc);
299 omap_i2c_init(); 300 omap_i2c_init();
300 platform_device_register(&omap_vwlan_device); 301 platform_device_register(&omap_vwlan_device);
302 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
301 usb_musb_init(NULL); 303 usb_musb_init(NULL);
302 enable_board_wakeup_source(); 304 enable_board_wakeup_source();
303 omap_serial_init(); 305 omap_serial_init();
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 626f3ea3142f..b6cc233214d7 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -20,6 +20,7 @@
20#include <linux/pinctrl/machine.h> 20#include <linux/pinctrl/machine.h>
21#include <linux/platform_data/omap4-keypad.h> 21#include <linux/platform_data/omap4-keypad.h>
22#include <linux/platform_data/omap_ocp2scp.h> 22#include <linux/platform_data/omap_ocp2scp.h>
23#include <linux/usb/omap_control_usb.h>
23 24
24#include <asm/mach-types.h> 25#include <asm/mach-types.h>
25#include <asm/mach/map.h> 26#include <asm/mach/map.h>
@@ -254,6 +255,49 @@ static inline void omap_init_camera(void)
254#endif 255#endif
255} 256}
256 257
258#if IS_ENABLED(CONFIG_OMAP_CONTROL_USB)
259static struct omap_control_usb_platform_data omap4_control_usb_pdata = {
260 .type = 1,
261};
262
263struct resource omap4_control_usb_res[] = {
264 {
265 .name = "control_dev_conf",
266 .start = 0x4a002300,
267 .end = 0x4a002303,
268 .flags = IORESOURCE_MEM,
269 },
270 {
271 .name = "otghs_control",
272 .start = 0x4a00233c,
273 .end = 0x4a00233f,
274 .flags = IORESOURCE_MEM,
275 },
276};
277
278static struct platform_device omap4_control_usb = {
279 .name = "omap-control-usb",
280 .id = -1,
281 .dev = {
282 .platform_data = &omap4_control_usb_pdata,
283 },
284 .num_resources = 2,
285 .resource = omap4_control_usb_res,
286};
287
288static inline void __init omap_init_control_usb(void)
289{
290 if (!cpu_is_omap44xx())
291 return;
292
293 if (platform_device_register(&omap4_control_usb))
294 pr_err("Error registering omap_control_usb device\n");
295}
296
297#else
298static inline void omap_init_control_usb(void) { }
299#endif /* CONFIG_OMAP_CONTROL_USB */
300
257int __init omap4_keyboard_init(struct omap4_keypad_platform_data 301int __init omap4_keyboard_init(struct omap4_keypad_platform_data
258 *sdp4430_keypad_data, struct omap_board_data *bdata) 302 *sdp4430_keypad_data, struct omap_board_data *bdata)
259{ 303{
@@ -721,6 +765,7 @@ static int __init omap2_init_devices(void)
721 omap_init_mbox(); 765 omap_init_mbox();
722 /* If dtb is there, the devices will be created dynamically */ 766 /* If dtb is there, the devices will be created dynamically */
723 if (!of_have_populated_dt()) { 767 if (!of_have_populated_dt()) {
768 omap_init_control_usb();
724 omap_init_dmic(); 769 omap_init_dmic();
725 omap_init_mcpdm(); 770 omap_init_mcpdm();
726 omap_init_mcspi(); 771 omap_init_mcspi();
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 8033cb747c86..64bac53da0e8 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1134,11 +1134,9 @@ static int gpmc_probe(struct platform_device *pdev)
1134 phys_base = res->start; 1134 phys_base = res->start;
1135 mem_size = resource_size(res); 1135 mem_size = resource_size(res);
1136 1136
1137 gpmc_base = devm_request_and_ioremap(&pdev->dev, res); 1137 gpmc_base = devm_ioremap_resource(&pdev->dev, res);
1138 if (!gpmc_base) { 1138 if (IS_ERR(gpmc_base))
1139 dev_err(&pdev->dev, "error: request memory / ioremap\n"); 1139 return PTR_ERR(gpmc_base);
1140 return -EADDRNOTAVAIL;
1141 }
1142 1140
1143 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1141 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1144 if (res == NULL) 1142 if (res == NULL)
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 793f54ac7d14..624a7e84a685 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2702,13 +2702,6 @@ static struct resource omap44xx_usb_phy_and_pll_addrs[] = {
2702 .end = 0x4a0ae000, 2702 .end = 0x4a0ae000,
2703 .flags = IORESOURCE_MEM, 2703 .flags = IORESOURCE_MEM,
2704 }, 2704 },
2705 {
2706 /* XXX: Remove this once control module driver is in place */
2707 .name = "ctrl_dev",
2708 .start = 0x4a002300,
2709 .end = 0x4a002303,
2710 .flags = IORESOURCE_MEM,
2711 },
2712 { } 2705 { }
2713}; 2706};
2714 2707
@@ -6156,12 +6149,6 @@ static struct omap_hwmod_addr_space omap44xx_usb_otg_hs_addrs[] = {
6156 .pa_end = 0x4a0ab7ff, 6149 .pa_end = 0x4a0ab7ff,
6157 .flags = ADDR_TYPE_RT 6150 .flags = ADDR_TYPE_RT
6158 }, 6151 },
6159 {
6160 /* XXX: Remove this once control module driver is in place */
6161 .pa_start = 0x4a00233c,
6162 .pa_end = 0x4a00233f,
6163 .flags = ADDR_TYPE_RT
6164 },
6165 { } 6152 { }
6166}; 6153};
6167 6154
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 7be3622cfc85..2d93d8b23835 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -351,12 +351,10 @@ static void omap3_pm_idle(void)
351 if (omap_irq_pending()) 351 if (omap_irq_pending())
352 goto out; 352 goto out;
353 353
354 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
355 trace_cpu_idle(1, smp_processor_id()); 354 trace_cpu_idle(1, smp_processor_id());
356 355
357 omap_sram_idle(); 356 omap_sram_idle();
358 357
359 trace_power_end(smp_processor_id());
360 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 358 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
361 359
362out: 360out:
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 7b33b375fe77..9d27e3f8a09c 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -85,6 +85,9 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
85 musb_plat.mode = board_data->mode; 85 musb_plat.mode = board_data->mode;
86 musb_plat.extvbus = board_data->extvbus; 86 musb_plat.extvbus = board_data->extvbus;
87 87
88 if (cpu_is_omap44xx())
89 musb_plat.has_mailbox = true;
90
88 if (soc_is_am35xx()) { 91 if (soc_is_am35xx()) {
89 oh_name = "am35x_otg_hs"; 92 oh_name = "am35x_otg_hs";
90 name = "musb-am35x"; 93 name = "musb-am35x";
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 616cb87b6179..69985b06c0da 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -53,17 +53,25 @@ static unsigned long ac97_reset_config[] = {
53 GPIO95_AC97_nRESET, 53 GPIO95_AC97_nRESET,
54}; 54};
55 55
56void pxa27x_assert_ac97reset(int reset_gpio, int on) 56void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio)
57{ 57{
58 /*
59 * This helper function is used to work around a bug in the pxa27x's
60 * ac97 controller during a warm reset. The configuration of the
61 * reset_gpio is changed as follows:
62 * to_gpio == true: configured to generic output gpio and driven high
63 * to_gpio == false: configured to ac97 controller alt fn AC97_nRESET
64 */
65
58 if (reset_gpio == 113) 66 if (reset_gpio == 113)
59 pxa2xx_mfp_config(on ? &ac97_reset_config[0] : 67 pxa2xx_mfp_config(to_gpio ? &ac97_reset_config[0] :
60 &ac97_reset_config[1], 1); 68 &ac97_reset_config[1], 1);
61 69
62 if (reset_gpio == 95) 70 if (reset_gpio == 95)
63 pxa2xx_mfp_config(on ? &ac97_reset_config[2] : 71 pxa2xx_mfp_config(to_gpio ? &ac97_reset_config[2] :
64 &ac97_reset_config[3], 1); 72 &ac97_reset_config[3], 1);
65} 73}
66EXPORT_SYMBOL_GPL(pxa27x_assert_ac97reset); 74EXPORT_SYMBOL_GPL(pxa27x_configure_ac97reset);
67 75
68/* Crystal clock: 13MHz */ 76/* Crystal clock: 13MHz */
69#define BASE_CLK 13000000 77#define BASE_CLK 13000000
diff --git a/arch/arm/mach-realview/include/mach/irqs-eb.h b/arch/arm/mach-realview/include/mach/irqs-eb.h
index d6b5073692d2..44754230fdcc 100644
--- a/arch/arm/mach-realview/include/mach/irqs-eb.h
+++ b/arch/arm/mach-realview/include/mach/irqs-eb.h
@@ -115,7 +115,7 @@
115/* 115/*
116 * Only define NR_IRQS if less than NR_IRQS_EB 116 * Only define NR_IRQS if less than NR_IRQS_EB
117 */ 117 */
118#define NR_IRQS_EB (IRQ_EB_GIC_START + 96) 118#define NR_IRQS_EB (IRQ_EB_GIC_START + 128)
119 119
120#if defined(CONFIG_MACH_REALVIEW_EB) \ 120#if defined(CONFIG_MACH_REALVIEW_EB) \
121 && (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB)) 121 && (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB))
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index 25df14a9e268..3445c4f42347 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -291,8 +291,8 @@ config MACH_JIVE
291 Say Y here if you are using the Logitech Jive. 291 Say Y here if you are using the Logitech Jive.
292 292
293config MACH_JIVE_SHOW_BOOTLOADER 293config MACH_JIVE_SHOW_BOOTLOADER
294 bool "Allow access to bootloader partitions in MTD (EXPERIMENTAL)" 294 bool "Allow access to bootloader partitions in MTD"
295 depends on MACH_JIVE && EXPERIMENTAL 295 depends on MACH_JIVE
296 296
297config MACH_S3C2413 297config MACH_S3C2413
298 bool 298 bool
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 9a23739f7026..442497363db9 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -16,6 +16,7 @@
16#include <linux/ioport.h> 16#include <linux/ioport.h>
17#include <linux/platform_data/sa11x0-serial.h> 17#include <linux/platform_data/sa11x0-serial.h>
18#include <linux/serial_core.h> 18#include <linux/serial_core.h>
19#include <linux/platform_device.h>
19#include <linux/mfd/ucb1x00.h> 20#include <linux/mfd/ucb1x00.h>
20#include <linux/mtd/mtd.h> 21#include <linux/mtd/mtd.h>
21#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 99ef190d0909..08294fa9e0d4 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -657,14 +657,8 @@ static struct platform_device lcdc_device = {
657/* FSI */ 657/* FSI */
658#define IRQ_FSI evt2irq(0x1840) 658#define IRQ_FSI evt2irq(0x1840)
659static struct sh_fsi_platform_info fsi_info = { 659static struct sh_fsi_platform_info fsi_info = {
660 .port_a = {
661 .flags = SH_FSI_BRS_INV,
662 },
663 .port_b = { 660 .port_b = {
664 .flags = SH_FSI_BRS_INV | 661 .flags = SH_FSI_CLK_CPG |
665 SH_FSI_BRM_INV |
666 SH_FSI_LRS_INV |
667 SH_FSI_CLK_CPG |
668 SH_FSI_FMT_SPDIF, 662 SH_FSI_FMT_SPDIF,
669 }, 663 },
670}; 664};
@@ -692,21 +686,21 @@ static struct platform_device fsi_device = {
692 }, 686 },
693}; 687};
694 688
695static struct asoc_simple_dai_init_info fsi2_ak4643_init_info = {
696 .fmt = SND_SOC_DAIFMT_LEFT_J,
697 .codec_daifmt = SND_SOC_DAIFMT_CBM_CFM,
698 .cpu_daifmt = SND_SOC_DAIFMT_CBS_CFS,
699 .sysclk = 11289600,
700};
701
702static struct asoc_simple_card_info fsi2_ak4643_info = { 689static struct asoc_simple_card_info fsi2_ak4643_info = {
703 .name = "AK4643", 690 .name = "AK4643",
704 .card = "FSI2A-AK4643", 691 .card = "FSI2A-AK4643",
705 .cpu_dai = "fsia-dai",
706 .codec = "ak4642-codec.0-0013", 692 .codec = "ak4642-codec.0-0013",
707 .platform = "sh_fsi2", 693 .platform = "sh_fsi2",
708 .codec_dai = "ak4642-hifi", 694 .daifmt = SND_SOC_DAIFMT_LEFT_J,
709 .init = &fsi2_ak4643_init_info, 695 .cpu_dai = {
696 .name = "fsia-dai",
697 .fmt = SND_SOC_DAIFMT_CBS_CFS,
698 },
699 .codec_dai = {
700 .name = "ak4642-hifi",
701 .fmt = SND_SOC_DAIFMT_CBM_CFM,
702 .sysclk = 11289600,
703 },
710}; 704};
711 705
712static struct platform_device fsi_ak4643_device = { 706static struct platform_device fsi_ak4643_device = {
@@ -815,18 +809,18 @@ static struct platform_device lcdc1_device = {
815 }, 809 },
816}; 810};
817 811
818static struct asoc_simple_dai_init_info fsi2_hdmi_init_info = {
819 .cpu_daifmt = SND_SOC_DAIFMT_CBM_CFM,
820};
821
822static struct asoc_simple_card_info fsi2_hdmi_info = { 812static struct asoc_simple_card_info fsi2_hdmi_info = {
823 .name = "HDMI", 813 .name = "HDMI",
824 .card = "FSI2B-HDMI", 814 .card = "FSI2B-HDMI",
825 .cpu_dai = "fsib-dai",
826 .codec = "sh-mobile-hdmi", 815 .codec = "sh-mobile-hdmi",
827 .platform = "sh_fsi2", 816 .platform = "sh_fsi2",
828 .codec_dai = "sh_mobile_hdmi-hifi", 817 .cpu_dai = {
829 .init = &fsi2_hdmi_init_info, 818 .name = "fsib-dai",
819 .fmt = SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_IB_NF,
820 },
821 .codec_dai = {
822 .name = "sh_mobile_hdmi-hifi",
823 },
830}; 824};
831 825
832static struct platform_device fsi_hdmi_device = { 826static struct platform_device fsi_hdmi_device = {
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 5353adf6b828..0679ca6bf1f6 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -806,21 +806,21 @@ static struct platform_device fsi_device = {
806}; 806};
807 807
808/* FSI-WM8978 */ 808/* FSI-WM8978 */
809static struct asoc_simple_dai_init_info fsi_wm8978_init_info = {
810 .fmt = SND_SOC_DAIFMT_I2S,
811 .codec_daifmt = SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_NB_NF,
812 .cpu_daifmt = SND_SOC_DAIFMT_CBS_CFS,
813 .sysclk = 12288000,
814};
815
816static struct asoc_simple_card_info fsi_wm8978_info = { 809static struct asoc_simple_card_info fsi_wm8978_info = {
817 .name = "wm8978", 810 .name = "wm8978",
818 .card = "FSI2A-WM8978", 811 .card = "FSI2A-WM8978",
819 .cpu_dai = "fsia-dai",
820 .codec = "wm8978.0-001a", 812 .codec = "wm8978.0-001a",
821 .platform = "sh_fsi2", 813 .platform = "sh_fsi2",
822 .codec_dai = "wm8978-hifi", 814 .daifmt = SND_SOC_DAIFMT_I2S,
823 .init = &fsi_wm8978_init_info, 815 .cpu_dai = {
816 .name = "fsia-dai",
817 .fmt = SND_SOC_DAIFMT_CBS_CFS | SND_SOC_DAIFMT_IB_NF,
818 },
819 .codec_dai = {
820 .name = "wm8978-hifi",
821 .fmt = SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_NB_NF,
822 .sysclk = 12288000,
823 },
824}; 824};
825 825
826static struct platform_device fsi_wm8978_device = { 826static struct platform_device fsi_wm8978_device = {
@@ -832,18 +832,18 @@ static struct platform_device fsi_wm8978_device = {
832}; 832};
833 833
834/* FSI-HDMI */ 834/* FSI-HDMI */
835static struct asoc_simple_dai_init_info fsi2_hdmi_init_info = {
836 .cpu_daifmt = SND_SOC_DAIFMT_CBM_CFM,
837};
838
839static struct asoc_simple_card_info fsi2_hdmi_info = { 835static struct asoc_simple_card_info fsi2_hdmi_info = {
840 .name = "HDMI", 836 .name = "HDMI",
841 .card = "FSI2B-HDMI", 837 .card = "FSI2B-HDMI",
842 .cpu_dai = "fsib-dai",
843 .codec = "sh-mobile-hdmi", 838 .codec = "sh-mobile-hdmi",
844 .platform = "sh_fsi2", 839 .platform = "sh_fsi2",
845 .codec_dai = "sh_mobile_hdmi-hifi", 840 .cpu_dai = {
846 .init = &fsi2_hdmi_init_info, 841 .name = "fsib-dai",
842 .fmt = SND_SOC_DAIFMT_CBM_CFM,
843 },
844 .codec_dai = {
845 .name = "sh_mobile_hdmi-hifi",
846 },
847}; 847};
848 848
849static struct platform_device fsi_hdmi_device = { 849static struct platform_device fsi_hdmi_device = {
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index c02448d6847f..f41b71e8df3e 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -525,21 +525,21 @@ static struct platform_device fsi_device = {
525 }, 525 },
526}; 526};
527 527
528static struct asoc_simple_dai_init_info fsi2_ak4648_init_info = {
529 .fmt = SND_SOC_DAIFMT_LEFT_J,
530 .codec_daifmt = SND_SOC_DAIFMT_CBM_CFM,
531 .cpu_daifmt = SND_SOC_DAIFMT_CBS_CFS,
532 .sysclk = 11289600,
533};
534
535static struct asoc_simple_card_info fsi2_ak4648_info = { 528static struct asoc_simple_card_info fsi2_ak4648_info = {
536 .name = "AK4648", 529 .name = "AK4648",
537 .card = "FSI2A-AK4648", 530 .card = "FSI2A-AK4648",
538 .cpu_dai = "fsia-dai",
539 .codec = "ak4642-codec.0-0012", 531 .codec = "ak4642-codec.0-0012",
540 .platform = "sh_fsi2", 532 .platform = "sh_fsi2",
541 .codec_dai = "ak4642-hifi", 533 .daifmt = SND_SOC_DAIFMT_LEFT_J,
542 .init = &fsi2_ak4648_init_info, 534 .cpu_dai = {
535 .name = "fsia-dai",
536 .fmt = SND_SOC_DAIFMT_CBS_CFS,
537 },
538 .codec_dai = {
539 .name = "ak4642-hifi",
540 .fmt = SND_SOC_DAIFMT_CBM_CFM,
541 .sysclk = 11289600,
542 },
543}; 543};
544 544
545static struct platform_device fsi_ak4648_device = { 545static struct platform_device fsi_ak4648_device = {
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 2fed62f66045..3fd716dae405 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -502,18 +502,18 @@ static struct platform_device hdmi_lcdc_device = {
502 }, 502 },
503}; 503};
504 504
505static struct asoc_simple_dai_init_info fsi2_hdmi_init_info = {
506 .cpu_daifmt = SND_SOC_DAIFMT_CBM_CFM,
507};
508
509static struct asoc_simple_card_info fsi2_hdmi_info = { 505static struct asoc_simple_card_info fsi2_hdmi_info = {
510 .name = "HDMI", 506 .name = "HDMI",
511 .card = "FSI2B-HDMI", 507 .card = "FSI2B-HDMI",
512 .cpu_dai = "fsib-dai",
513 .codec = "sh-mobile-hdmi", 508 .codec = "sh-mobile-hdmi",
514 .platform = "sh_fsi2", 509 .platform = "sh_fsi2",
515 .codec_dai = "sh_mobile_hdmi-hifi", 510 .cpu_dai = {
516 .init = &fsi2_hdmi_init_info, 511 .name = "fsib-dai",
512 .fmt = SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_IB_NF,
513 },
514 .codec_dai = {
515 .name = "sh_mobile_hdmi-hifi",
516 },
517}; 517};
518 518
519static struct platform_device fsi_hdmi_device = { 519static struct platform_device fsi_hdmi_device = {
@@ -858,16 +858,12 @@ static struct platform_device leds_device = {
858#define IRQ_FSI evt2irq(0x1840) 858#define IRQ_FSI evt2irq(0x1840)
859static struct sh_fsi_platform_info fsi_info = { 859static struct sh_fsi_platform_info fsi_info = {
860 .port_a = { 860 .port_a = {
861 .flags = SH_FSI_BRS_INV,
862 .tx_id = SHDMA_SLAVE_FSIA_TX, 861 .tx_id = SHDMA_SLAVE_FSIA_TX,
863 .rx_id = SHDMA_SLAVE_FSIA_RX, 862 .rx_id = SHDMA_SLAVE_FSIA_RX,
864 }, 863 },
865 .port_b = { 864 .port_b = {
866 .flags = SH_FSI_BRS_INV | 865 .flags = SH_FSI_CLK_CPG |
867 SH_FSI_BRM_INV | 866 SH_FSI_FMT_SPDIF,
868 SH_FSI_LRS_INV |
869 SH_FSI_CLK_CPG |
870 SH_FSI_FMT_SPDIF,
871 } 867 }
872}; 868};
873 869
@@ -896,21 +892,21 @@ static struct platform_device fsi_device = {
896 }, 892 },
897}; 893};
898 894
899static struct asoc_simple_dai_init_info fsi2_ak4643_init_info = {
900 .fmt = SND_SOC_DAIFMT_LEFT_J,
901 .codec_daifmt = SND_SOC_DAIFMT_CBM_CFM,
902 .cpu_daifmt = SND_SOC_DAIFMT_CBS_CFS,
903 .sysclk = 11289600,
904};
905
906static struct asoc_simple_card_info fsi2_ak4643_info = { 895static struct asoc_simple_card_info fsi2_ak4643_info = {
907 .name = "AK4643", 896 .name = "AK4643",
908 .card = "FSI2A-AK4643", 897 .card = "FSI2A-AK4643",
909 .cpu_dai = "fsia-dai",
910 .codec = "ak4642-codec.0-0013", 898 .codec = "ak4642-codec.0-0013",
911 .platform = "sh_fsi2", 899 .platform = "sh_fsi2",
912 .codec_dai = "ak4642-hifi", 900 .daifmt = SND_SOC_DAIFMT_LEFT_J,
913 .init = &fsi2_ak4643_init_info, 901 .cpu_dai = {
902 .name = "fsia-dai",
903 .fmt = SND_SOC_DAIFMT_CBS_CFS,
904 },
905 .codec_dai = {
906 .name = "ak4642-hifi",
907 .fmt = SND_SOC_DAIFMT_CBM_CFM,
908 .sysclk = 11289600,
909 },
914}; 910};
915 911
916static struct platform_device fsi_ak4643_device = { 912static struct platform_device fsi_ak4643_device = {
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index 3fdd0085e306..8709a39bd34c 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -7,3 +7,4 @@ config ARCH_SUNXI
7 select PINCTRL 7 select PINCTRL
8 select SPARSE_IRQ 8 select SPARSE_IRQ
9 select SUNXI_TIMER 9 select SUNXI_TIMER
10 select PINCTRL_SUNXI \ No newline at end of file
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c
index a74d3c7d2e26..a36a03d3c9a0 100644
--- a/arch/arm/mach-tegra/cpu-tegra.c
+++ b/arch/arm/mach-tegra/cpu-tegra.c
@@ -243,8 +243,7 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
243 /* FIXME: what's the actual transition time? */ 243 /* FIXME: what's the actual transition time? */
244 policy->cpuinfo.transition_latency = 300 * 1000; 244 policy->cpuinfo.transition_latency = 300 * 1000;
245 245
246 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 246 cpumask_copy(policy->cpus, cpu_possible_mask);
247 cpumask_copy(policy->related_cpus, cpu_possible_mask);
248 247
249 if (policy->cpu == 0) 248 if (policy->cpu == 0)
250 register_pm_notifier(&tegra_cpu_pm_notifier); 249 register_pm_notifier(&tegra_cpu_pm_notifier);
diff --git a/arch/arm/mach-tegra/tegra2_emc.c b/arch/arm/mach-tegra/tegra2_emc.c
index e18aa2f83ebf..ce7ce42a1ac9 100644
--- a/arch/arm/mach-tegra/tegra2_emc.c
+++ b/arch/arm/mach-tegra/tegra2_emc.c
@@ -312,11 +312,9 @@ static int tegra_emc_probe(struct platform_device *pdev)
312 return -ENOMEM; 312 return -ENOMEM;
313 } 313 }
314 314
315 emc_regbase = devm_request_and_ioremap(&pdev->dev, res); 315 emc_regbase = devm_ioremap_resource(&pdev->dev, res);
316 if (!emc_regbase) { 316 if (IS_ERR(emc_regbase))
317 dev_err(&pdev->dev, "failed to remap registers\n"); 317 return PTR_ERR(emc_regbase);
318 return -ENOMEM;
319 }
320 318
321 pdata = pdev->dev.platform_data; 319 pdata = pdev->dev.platform_data;
322 320
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 5dea90636d94..3e5bbd0e5b23 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -11,6 +11,7 @@ config UX500_SOC_COMMON
11 select COMMON_CLK 11 select COMMON_CLK
12 select PINCTRL 12 select PINCTRL
13 select PINCTRL_NOMADIK 13 select PINCTRL_NOMADIK
14 select PINCTRL_ABX500
14 select PL310_ERRATA_753970 if CACHE_PL310 15 select PL310_ERRATA_753970 if CACHE_PL310
15 16
16config UX500_SOC_DB8500 17config UX500_SOC_DB8500
@@ -18,6 +19,11 @@ config UX500_SOC_DB8500
18 select CPU_FREQ_TABLE if CPU_FREQ 19 select CPU_FREQ_TABLE if CPU_FREQ
19 select MFD_DB8500_PRCMU 20 select MFD_DB8500_PRCMU
20 select PINCTRL_DB8500 21 select PINCTRL_DB8500
22 select PINCTRL_DB8540
23 select PINCTRL_AB8500
24 select PINCTRL_AB8505
25 select PINCTRL_AB9540
26 select PINCTRL_AB8540
21 select REGULATOR 27 select REGULATOR
22 select REGULATOR_DB8500_PRCMU 28 select REGULATOR_DB8500_PRCMU
23 29
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index d453522edb0d..b8781caa54b8 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -90,26 +90,8 @@ static struct platform_device snowball_gpio_en_3v3_regulator_dev = {
90 }, 90 },
91}; 91};
92 92
93static struct ab8500_gpio_platform_data ab8500_gpio_pdata = { 93static struct abx500_gpio_platform_data ab8500_gpio_pdata = {
94 .gpio_base = MOP500_AB8500_PIN_GPIO(1), 94 .gpio_base = MOP500_AB8500_PIN_GPIO(1),
95 .irq_base = MOP500_AB8500_VIR_GPIO_IRQ_BASE,
96 /* config_reg is the initial configuration of ab8500 pins.
97 * The pins can be configured as GPIO or alt functions based
98 * on value present in GpioSel1 to GpioSel6 and AlternatFunction
99 * register. This is the array of 7 configuration settings.
100 * One has to compile time decide these settings. Below is the
101 * explanation of these setting
102 * GpioSel1 = 0x00 => Pins GPIO1 to GPIO8 are not used as GPIO
103 * GpioSel2 = 0x1E => Pins GPIO10 to GPIO13 are configured as GPIO
104 * GpioSel3 = 0x80 => Pin GPIO24 is configured as GPIO
105 * GpioSel4 = 0x01 => Pin GPIo25 is configured as GPIO
106 * GpioSel5 = 0x7A => Pins GPIO34, GPIO36 to GPIO39 are conf as GPIO
107 * GpioSel6 = 0x00 => Pins GPIO41 & GPIo42 are not configured as GPIO
108 * AlternaFunction = 0x00 => If Pins GPIO10 to 13 are not configured
109 * as GPIO then this register selectes the alternate fucntions
110 */
111 .config_reg = {0x00, 0x1E, 0x80, 0x01,
112 0x7A, 0x00, 0x00},
113}; 95};
114 96
115/* ab8500-codec */ 97/* ab8500-codec */
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 5b286e06474c..b80ad9610e97 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -285,7 +285,7 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
285 OF_DEV_AUXDATA("st,nomadik-i2c", 0x80110000, "nmk-i2c.3", NULL), 285 OF_DEV_AUXDATA("st,nomadik-i2c", 0x80110000, "nmk-i2c.3", NULL),
286 OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL), 286 OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL),
287 /* Requires device name bindings. */ 287 /* Requires device name bindings. */
288 OF_DEV_AUXDATA("stericsson,nmk_pinctrl", U8500_PRCMU_BASE, 288 OF_DEV_AUXDATA("stericsson,nmk-pinctrl", U8500_PRCMU_BASE,
289 "pinctrl-db8500", NULL), 289 "pinctrl-db8500", NULL),
290 /* Requires clock name and DMA bindings. */ 290 /* Requires clock name and DMA bindings. */
291 OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000, 291 OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000,
diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
index 7d34c52798b5..d526dd8e87d3 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
@@ -38,15 +38,7 @@
38#define MOP500_STMPE1601_IRQ_END \ 38#define MOP500_STMPE1601_IRQ_END \
39 MOP500_STMPE1601_IRQ(STMPE_NR_INTERNAL_IRQS) 39 MOP500_STMPE1601_IRQ(STMPE_NR_INTERNAL_IRQS)
40 40
41/* AB8500 virtual gpio IRQ */ 41#define MOP500_NR_IRQS MOP500_STMPE1601_IRQ_END
42#define AB8500_VIR_GPIO_NR_IRQS 16
43
44#define MOP500_AB8500_VIR_GPIO_IRQ_BASE \
45 MOP500_STMPE1601_IRQ_END
46#define MOP500_AB8500_VIR_GPIO_IRQ_END \
47 (MOP500_AB8500_VIR_GPIO_IRQ_BASE + AB8500_VIR_GPIO_NR_IRQS)
48
49#define MOP500_NR_IRQS MOP500_AB8500_VIR_GPIO_IRQ_END
50 42
51#define MOP500_IRQ_END MOP500_NR_IRQS 43#define MOP500_IRQ_END MOP500_NR_IRQS
52 44
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 5d5929450366..a78827b70270 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -36,6 +36,7 @@
36#include <linux/gfp.h> 36#include <linux/gfp.h>
37#include <linux/clkdev.h> 37#include <linux/clkdev.h>
38#include <linux/mtd/physmap.h> 38#include <linux/mtd/physmap.h>
39#include <linux/bitops.h>
39 40
40#include <asm/irq.h> 41#include <asm/irq.h>
41#include <asm/hardware/arm_timer.h> 42#include <asm/hardware/arm_timer.h>
@@ -65,16 +66,28 @@
65#define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE) 66#define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE)
66#define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE) 67#define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE)
67 68
69/* These PIC IRQs are valid in each configuration */
70#define PIC_VALID_ALL BIT(SIC_INT_KMI0) | BIT(SIC_INT_KMI1) | \
71 BIT(SIC_INT_SCI3) | BIT(SIC_INT_UART3) | \
72 BIT(SIC_INT_CLCD) | BIT(SIC_INT_TOUCH) | \
73 BIT(SIC_INT_KEYPAD) | BIT(SIC_INT_DoC) | \
74 BIT(SIC_INT_USB) | BIT(SIC_INT_PCI0) | \
75 BIT(SIC_INT_PCI1) | BIT(SIC_INT_PCI2) | \
76 BIT(SIC_INT_PCI3)
68#if 1 77#if 1
69#define IRQ_MMCI0A IRQ_VICSOURCE22 78#define IRQ_MMCI0A IRQ_VICSOURCE22
70#define IRQ_AACI IRQ_VICSOURCE24 79#define IRQ_AACI IRQ_VICSOURCE24
71#define IRQ_ETH IRQ_VICSOURCE25 80#define IRQ_ETH IRQ_VICSOURCE25
72#define PIC_MASK 0xFFD00000 81#define PIC_MASK 0xFFD00000
82#define PIC_VALID PIC_VALID_ALL
73#else 83#else
74#define IRQ_MMCI0A IRQ_SIC_MMCI0A 84#define IRQ_MMCI0A IRQ_SIC_MMCI0A
75#define IRQ_AACI IRQ_SIC_AACI 85#define IRQ_AACI IRQ_SIC_AACI
76#define IRQ_ETH IRQ_SIC_ETH 86#define IRQ_ETH IRQ_SIC_ETH
77#define PIC_MASK 0 87#define PIC_MASK 0
88#define PIC_VALID PIC_VALID_ALL | BIT(SIC_INT_MMCI0A) | \
89 BIT(SIC_INT_MMCI1A) | BIT(SIC_INT_AACI) | \
90 BIT(SIC_INT_ETH)
78#endif 91#endif
79 92
80/* Lookup table for finding a DT node that represents the vic instance */ 93/* Lookup table for finding a DT node that represents the vic instance */
@@ -102,7 +115,7 @@ void __init versatile_init_irq(void)
102 VERSATILE_SIC_BASE); 115 VERSATILE_SIC_BASE);
103 116
104 fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START, 117 fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START,
105 IRQ_VICSOURCE31, ~PIC_MASK, np); 118 IRQ_VICSOURCE31, PIC_VALID, np);
106 119
107 /* 120 /*
108 * Interrupts on secondary controller from 0 to 8 are routed to 121 * Interrupts on secondary controller from 0 to 8 are routed to
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
index 2f84f4094f13..e92e5e0705bc 100644
--- a/arch/arm/mach-versatile/pci.c
+++ b/arch/arm/mach-versatile/pci.c
@@ -23,6 +23,7 @@
23#include <linux/io.h> 23#include <linux/io.h>
24 24
25#include <mach/hardware.h> 25#include <mach/hardware.h>
26#include <mach/irqs.h>
26#include <asm/irq.h> 27#include <asm/irq.h>
27#include <asm/mach/pci.h> 28#include <asm/mach/pci.h>
28 29
@@ -327,12 +328,12 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
327 int irq; 328 int irq;
328 329
329 /* slot, pin, irq 330 /* slot, pin, irq
330 * 24 1 27 331 * 24 1 IRQ_SIC_PCI0
331 * 25 1 28 332 * 25 1 IRQ_SIC_PCI1
332 * 26 1 29 333 * 26 1 IRQ_SIC_PCI2
333 * 27 1 30 334 * 27 1 IRQ_SIC_PCI3
334 */ 335 */
335 irq = 27 + ((slot - 24 + pin - 1) & 3); 336 irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3);
336 337
337 return irq; 338 return irq;
338} 339}
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 3fd629d5a513..025d17328730 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -629,8 +629,9 @@ config ARM_THUMBEE
629 make use of it. Say N for code that can run on CPUs without ThumbEE. 629 make use of it. Say N for code that can run on CPUs without ThumbEE.
630 630
631config ARM_VIRT_EXT 631config ARM_VIRT_EXT
632 bool "Native support for the ARM Virtualization Extensions" 632 bool
633 depends on MMU && CPU_V7 633 depends on MMU
634 default y if CPU_V7
634 help 635 help
635 Enable the kernel to make use of the ARM Virtualization 636 Enable the kernel to make use of the ARM Virtualization
636 Extensions to install hypervisors without run-time firmware 637 Extensions to install hypervisors without run-time firmware
@@ -640,11 +641,6 @@ config ARM_VIRT_EXT
640 use of this feature. Refer to Documentation/arm/Booting for 641 use of this feature. Refer to Documentation/arm/Booting for
641 details. 642 details.
642 643
643 It is safe to enable this option even if the kernel may not be
644 booted in HYP mode, may not have support for the
645 virtualization extensions, or may be booted with a
646 non-compliant bootloader.
647
648config SWP_EMULATE 644config SWP_EMULATE
649 bool "Emulate SWP/SWPB instructions" 645 bool "Emulate SWP/SWPB instructions"
650 depends on !CPU_USE_DOMAINS && CPU_V7 646 depends on !CPU_USE_DOMAINS && CPU_V7
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 8a9c4cb50a93..4e333fa2756f 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
6 iomap.o 6 iomap.o
7 7
8obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ 8obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
9 mmap.o pgd.o mmu.o vmregion.o 9 mmap.o pgd.o mmu.o
10 10
11ifneq ($(CONFIG_MMU),y) 11ifneq ($(CONFIG_MMU),y)
12obj-y += nommu.o 12obj-y += nommu.o
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index bc4a5e9ebb78..7a0511191f6b 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -34,6 +34,9 @@
34 * The ASID is used to tag entries in the CPU caches and TLBs. 34 * The ASID is used to tag entries in the CPU caches and TLBs.
35 * The context ID is used by debuggers and trace logic, and 35 * The context ID is used by debuggers and trace logic, and
36 * should be unique within all running processes. 36 * should be unique within all running processes.
37 *
38 * In big endian operation, the two 32 bit words are swapped if accesed by
39 * non 64-bit operations.
37 */ 40 */
38#define ASID_FIRST_VERSION (1ULL << ASID_BITS) 41#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
39#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) 42#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 076c26d43864..dda3904dc64c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
640 640
641 if (is_coherent || nommu()) 641 if (is_coherent || nommu())
642 addr = __alloc_simple_buffer(dev, size, gfp, &page); 642 addr = __alloc_simple_buffer(dev, size, gfp, &page);
643 else if (gfp & GFP_ATOMIC) 643 else if (!(gfp & __GFP_WAIT))
644 addr = __alloc_from_pool(size, &page); 644 addr = __alloc_from_pool(size, &page);
645 else if (!IS_ENABLED(CONFIG_CMA)) 645 else if (!IS_ENABLED(CONFIG_CMA))
646 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 646 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 99db769307ec..2dffc010cc41 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -1,4 +1,6 @@
1#include <linux/module.h>
1#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/slab.h>
2 4
3#include <asm/cputype.h> 5#include <asm/cputype.h>
4#include <asm/idmap.h> 6#include <asm/idmap.h>
@@ -6,6 +8,7 @@
6#include <asm/pgtable.h> 8#include <asm/pgtable.h>
7#include <asm/sections.h> 9#include <asm/sections.h>
8#include <asm/system_info.h> 10#include <asm/system_info.h>
11#include <asm/virt.h>
9 12
10pgd_t *idmap_pgd; 13pgd_t *idmap_pgd;
11 14
@@ -59,11 +62,17 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
59 } while (pud++, addr = next, addr != end); 62 } while (pud++, addr = next, addr != end);
60} 63}
61 64
62static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) 65static void identity_mapping_add(pgd_t *pgd, const char *text_start,
66 const char *text_end, unsigned long prot)
63{ 67{
64 unsigned long prot, next; 68 unsigned long addr, end;
69 unsigned long next;
70
71 addr = virt_to_phys(text_start);
72 end = virt_to_phys(text_end);
73
74 prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
65 75
66 prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
67 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) 76 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
68 prot |= PMD_BIT4; 77 prot |= PMD_BIT4;
69 78
@@ -74,28 +83,52 @@ static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long e
74 } while (pgd++, addr = next, addr != end); 83 } while (pgd++, addr = next, addr != end);
75} 84}
76 85
86#if defined(CONFIG_ARM_VIRT_EXT) && defined(CONFIG_ARM_LPAE)
87pgd_t *hyp_pgd;
88
89extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
90
91static int __init init_static_idmap_hyp(void)
92{
93 hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
94 if (!hyp_pgd)
95 return -ENOMEM;
96
97 pr_info("Setting up static HYP identity map for 0x%p - 0x%p\n",
98 __hyp_idmap_text_start, __hyp_idmap_text_end);
99 identity_mapping_add(hyp_pgd, __hyp_idmap_text_start,
100 __hyp_idmap_text_end, PMD_SECT_AP1);
101
102 return 0;
103}
104#else
105static int __init init_static_idmap_hyp(void)
106{
107 return 0;
108}
109#endif
110
77extern char __idmap_text_start[], __idmap_text_end[]; 111extern char __idmap_text_start[], __idmap_text_end[];
78 112
79static int __init init_static_idmap(void) 113static int __init init_static_idmap(void)
80{ 114{
81 phys_addr_t idmap_start, idmap_end; 115 int ret;
82 116
83 idmap_pgd = pgd_alloc(&init_mm); 117 idmap_pgd = pgd_alloc(&init_mm);
84 if (!idmap_pgd) 118 if (!idmap_pgd)
85 return -ENOMEM; 119 return -ENOMEM;
86 120
87 /* Add an identity mapping for the physical address of the section. */ 121 pr_info("Setting up static identity map for 0x%p - 0x%p\n",
88 idmap_start = virt_to_phys((void *)__idmap_text_start); 122 __idmap_text_start, __idmap_text_end);
89 idmap_end = virt_to_phys((void *)__idmap_text_end); 123 identity_mapping_add(idmap_pgd, __idmap_text_start,
124 __idmap_text_end, 0);
90 125
91 pr_info("Setting up static identity map for 0x%llx - 0x%llx\n", 126 ret = init_static_idmap_hyp();
92 (long long)idmap_start, (long long)idmap_end);
93 identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
94 127
95 /* Flush L1 for the hardware to see this page table content */ 128 /* Flush L1 for the hardware to see this page table content */
96 flush_cache_louis(); 129 flush_cache_louis();
97 130
98 return 0; 131 return ret;
99} 132}
100early_initcall(init_static_idmap); 133early_initcall(init_static_idmap);
101 134
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 88fd86cf3d9a..04d9006eab1f 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -39,6 +39,70 @@
39#include <asm/mach/pci.h> 39#include <asm/mach/pci.h>
40#include "mm.h" 40#include "mm.h"
41 41
42
43LIST_HEAD(static_vmlist);
44
45static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
46 size_t size, unsigned int mtype)
47{
48 struct static_vm *svm;
49 struct vm_struct *vm;
50
51 list_for_each_entry(svm, &static_vmlist, list) {
52 vm = &svm->vm;
53 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
54 continue;
55 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
56 continue;
57
58 if (vm->phys_addr > paddr ||
59 paddr + size - 1 > vm->phys_addr + vm->size - 1)
60 continue;
61
62 return svm;
63 }
64
65 return NULL;
66}
67
68struct static_vm *find_static_vm_vaddr(void *vaddr)
69{
70 struct static_vm *svm;
71 struct vm_struct *vm;
72
73 list_for_each_entry(svm, &static_vmlist, list) {
74 vm = &svm->vm;
75
76 /* static_vmlist is ascending order */
77 if (vm->addr > vaddr)
78 break;
79
80 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
81 return svm;
82 }
83
84 return NULL;
85}
86
87void __init add_static_vm_early(struct static_vm *svm)
88{
89 struct static_vm *curr_svm;
90 struct vm_struct *vm;
91 void *vaddr;
92
93 vm = &svm->vm;
94 vm_area_add_early(vm);
95 vaddr = vm->addr;
96
97 list_for_each_entry(curr_svm, &static_vmlist, list) {
98 vm = &curr_svm->vm;
99
100 if (vm->addr > vaddr)
101 break;
102 }
103 list_add_tail(&svm->list, &curr_svm->list);
104}
105
42int ioremap_page(unsigned long virt, unsigned long phys, 106int ioremap_page(unsigned long virt, unsigned long phys,
43 const struct mem_type *mtype) 107 const struct mem_type *mtype)
44{ 108{
@@ -197,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
197 const struct mem_type *type; 261 const struct mem_type *type;
198 int err; 262 int err;
199 unsigned long addr; 263 unsigned long addr;
200 struct vm_struct * area; 264 struct vm_struct *area;
265 phys_addr_t paddr = __pfn_to_phys(pfn);
201 266
202#ifndef CONFIG_ARM_LPAE 267#ifndef CONFIG_ARM_LPAE
203 /* 268 /*
204 * High mappings must be supersection aligned 269 * High mappings must be supersection aligned
205 */ 270 */
206 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) 271 if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
207 return NULL; 272 return NULL;
208#endif 273#endif
209 274
@@ -219,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
219 /* 284 /*
220 * Try to reuse one of the static mapping whenever possible. 285 * Try to reuse one of the static mapping whenever possible.
221 */ 286 */
222 read_lock(&vmlist_lock); 287 if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
223 for (area = vmlist; area; area = area->next) { 288 struct static_vm *svm;
224 if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) 289
225 break; 290 svm = find_static_vm_paddr(paddr, size, mtype);
226 if (!(area->flags & VM_ARM_STATIC_MAPPING)) 291 if (svm) {
227 continue; 292 addr = (unsigned long)svm->vm.addr;
228 if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) 293 addr += paddr - svm->vm.phys_addr;
229 continue; 294 return (void __iomem *) (offset + addr);
230 if (__phys_to_pfn(area->phys_addr) > pfn || 295 }
231 __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
232 continue;
233 /* we can drop the lock here as we know *area is static */
234 read_unlock(&vmlist_lock);
235 addr = (unsigned long)area->addr;
236 addr += __pfn_to_phys(pfn) - area->phys_addr;
237 return (void __iomem *) (offset + addr);
238 } 296 }
239 read_unlock(&vmlist_lock);
240 297
241 /* 298 /*
242 * Don't allow RAM to be mapped - this causes problems with ARMv6+ 299 * Don't allow RAM to be mapped - this causes problems with ARMv6+
@@ -248,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
248 if (!area) 305 if (!area)
249 return NULL; 306 return NULL;
250 addr = (unsigned long)area->addr; 307 addr = (unsigned long)area->addr;
251 area->phys_addr = __pfn_to_phys(pfn); 308 area->phys_addr = paddr;
252 309
253#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 310#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
254 if (DOMAIN_IO == 0 && 311 if (DOMAIN_IO == 0 &&
255 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || 312 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
256 cpu_is_xsc3()) && pfn >= 0x100000 && 313 cpu_is_xsc3()) && pfn >= 0x100000 &&
257 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { 314 !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
258 area->flags |= VM_ARM_SECTION_MAPPING; 315 area->flags |= VM_ARM_SECTION_MAPPING;
259 err = remap_area_supersections(addr, pfn, size, type); 316 err = remap_area_supersections(addr, pfn, size, type);
260 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { 317 } else if (!((paddr | size | addr) & ~PMD_MASK)) {
261 area->flags |= VM_ARM_SECTION_MAPPING; 318 area->flags |= VM_ARM_SECTION_MAPPING;
262 err = remap_area_sections(addr, pfn, size, type); 319 err = remap_area_sections(addr, pfn, size, type);
263 } else 320 } else
264#endif 321#endif
265 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), 322 err = ioremap_page_range(addr, addr + size, paddr,
266 __pgprot(type->prot_pte)); 323 __pgprot(type->prot_pte));
267 324
268 if (err) { 325 if (err) {
@@ -346,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
346void __iounmap(volatile void __iomem *io_addr) 403void __iounmap(volatile void __iomem *io_addr)
347{ 404{
348 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); 405 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
349 struct vm_struct *vm; 406 struct static_vm *svm;
407
408 /* If this is a static mapping, we must leave it alone */
409 svm = find_static_vm_vaddr(addr);
410 if (svm)
411 return;
350 412
351 read_lock(&vmlist_lock);
352 for (vm = vmlist; vm; vm = vm->next) {
353 if (vm->addr > addr)
354 break;
355 if (!(vm->flags & VM_IOREMAP))
356 continue;
357 /* If this is a static mapping we must leave it alone */
358 if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
359 (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
360 read_unlock(&vmlist_lock);
361 return;
362 }
363#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 413#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
414 {
415 struct vm_struct *vm;
416
417 vm = find_vm_area(addr);
418
364 /* 419 /*
365 * If this is a section based mapping we need to handle it 420 * If this is a section based mapping we need to handle it
366 * specially as the VM subsystem does not know how to handle 421 * specially as the VM subsystem does not know how to handle
367 * such a beast. 422 * such a beast.
368 */ 423 */
369 if ((vm->addr == addr) && 424 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
370 (vm->flags & VM_ARM_SECTION_MAPPING)) {
371 unmap_area_sections((unsigned long)vm->addr, vm->size); 425 unmap_area_sections((unsigned long)vm->addr, vm->size);
372 break;
373 }
374#endif
375 } 426 }
376 read_unlock(&vmlist_lock); 427#endif
377 428
378 vunmap(addr); 429 vunmap(addr);
379} 430}
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index a8ee92da3544..d5a4e9ad8f0f 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -1,4 +1,6 @@
1#ifdef CONFIG_MMU 1#ifdef CONFIG_MMU
2#include <linux/list.h>
3#include <linux/vmalloc.h>
2 4
3/* the upper-most page table pointer */ 5/* the upper-most page table pointer */
4extern pmd_t *top_pmd; 6extern pmd_t *top_pmd;
@@ -65,6 +67,16 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
65/* consistent regions used by dma_alloc_attrs() */ 67/* consistent regions used by dma_alloc_attrs() */
66#define VM_ARM_DMA_CONSISTENT 0x20000000 68#define VM_ARM_DMA_CONSISTENT 0x20000000
67 69
70
71struct static_vm {
72 struct vm_struct vm;
73 struct list_head list;
74};
75
76extern struct list_head static_vmlist;
77extern struct static_vm *find_static_vm_vaddr(void *vaddr);
78extern __init void add_static_vm_early(struct static_vm *svm);
79
68#endif 80#endif
69 81
70#ifdef CONFIG_ZONE_DMA 82#ifdef CONFIG_ZONE_DMA
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ce328c7f5c94..e95a996ab78f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -57,6 +57,9 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
57static unsigned int ecc_mask __initdata = 0; 57static unsigned int ecc_mask __initdata = 0;
58pgprot_t pgprot_user; 58pgprot_t pgprot_user;
59pgprot_t pgprot_kernel; 59pgprot_t pgprot_kernel;
60pgprot_t pgprot_hyp_device;
61pgprot_t pgprot_s2;
62pgprot_t pgprot_s2_device;
60 63
61EXPORT_SYMBOL(pgprot_user); 64EXPORT_SYMBOL(pgprot_user);
62EXPORT_SYMBOL(pgprot_kernel); 65EXPORT_SYMBOL(pgprot_kernel);
@@ -66,34 +69,46 @@ struct cachepolicy {
66 unsigned int cr_mask; 69 unsigned int cr_mask;
67 pmdval_t pmd; 70 pmdval_t pmd;
68 pteval_t pte; 71 pteval_t pte;
72 pteval_t pte_s2;
69}; 73};
70 74
75#ifdef CONFIG_ARM_LPAE
76#define s2_policy(policy) policy
77#else
78#define s2_policy(policy) 0
79#endif
80
71static struct cachepolicy cache_policies[] __initdata = { 81static struct cachepolicy cache_policies[] __initdata = {
72 { 82 {
73 .policy = "uncached", 83 .policy = "uncached",
74 .cr_mask = CR_W|CR_C, 84 .cr_mask = CR_W|CR_C,
75 .pmd = PMD_SECT_UNCACHED, 85 .pmd = PMD_SECT_UNCACHED,
76 .pte = L_PTE_MT_UNCACHED, 86 .pte = L_PTE_MT_UNCACHED,
87 .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
77 }, { 88 }, {
78 .policy = "buffered", 89 .policy = "buffered",
79 .cr_mask = CR_C, 90 .cr_mask = CR_C,
80 .pmd = PMD_SECT_BUFFERED, 91 .pmd = PMD_SECT_BUFFERED,
81 .pte = L_PTE_MT_BUFFERABLE, 92 .pte = L_PTE_MT_BUFFERABLE,
93 .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
82 }, { 94 }, {
83 .policy = "writethrough", 95 .policy = "writethrough",
84 .cr_mask = 0, 96 .cr_mask = 0,
85 .pmd = PMD_SECT_WT, 97 .pmd = PMD_SECT_WT,
86 .pte = L_PTE_MT_WRITETHROUGH, 98 .pte = L_PTE_MT_WRITETHROUGH,
99 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
87 }, { 100 }, {
88 .policy = "writeback", 101 .policy = "writeback",
89 .cr_mask = 0, 102 .cr_mask = 0,
90 .pmd = PMD_SECT_WB, 103 .pmd = PMD_SECT_WB,
91 .pte = L_PTE_MT_WRITEBACK, 104 .pte = L_PTE_MT_WRITEBACK,
105 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
92 }, { 106 }, {
93 .policy = "writealloc", 107 .policy = "writealloc",
94 .cr_mask = 0, 108 .cr_mask = 0,
95 .pmd = PMD_SECT_WBWA, 109 .pmd = PMD_SECT_WBWA,
96 .pte = L_PTE_MT_WRITEALLOC, 110 .pte = L_PTE_MT_WRITEALLOC,
111 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
97 } 112 }
98}; 113};
99 114
@@ -310,6 +325,7 @@ static void __init build_mem_type_table(void)
310 struct cachepolicy *cp; 325 struct cachepolicy *cp;
311 unsigned int cr = get_cr(); 326 unsigned int cr = get_cr();
312 pteval_t user_pgprot, kern_pgprot, vecs_pgprot; 327 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
328 pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
313 int cpu_arch = cpu_architecture(); 329 int cpu_arch = cpu_architecture();
314 int i; 330 int i;
315 331
@@ -421,6 +437,8 @@ static void __init build_mem_type_table(void)
421 */ 437 */
422 cp = &cache_policies[cachepolicy]; 438 cp = &cache_policies[cachepolicy];
423 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 439 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
440 s2_pgprot = cp->pte_s2;
441 hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
424 442
425 /* 443 /*
426 * ARMv6 and above have extended page tables. 444 * ARMv6 and above have extended page tables.
@@ -444,6 +462,7 @@ static void __init build_mem_type_table(void)
444 user_pgprot |= L_PTE_SHARED; 462 user_pgprot |= L_PTE_SHARED;
445 kern_pgprot |= L_PTE_SHARED; 463 kern_pgprot |= L_PTE_SHARED;
446 vecs_pgprot |= L_PTE_SHARED; 464 vecs_pgprot |= L_PTE_SHARED;
465 s2_pgprot |= L_PTE_SHARED;
447 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; 466 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
448 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; 467 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
449 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 468 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
@@ -498,6 +517,9 @@ static void __init build_mem_type_table(void)
498 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 517 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
499 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 518 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
500 L_PTE_DIRTY | kern_pgprot); 519 L_PTE_DIRTY | kern_pgprot);
520 pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
521 pgprot_s2_device = __pgprot(s2_device_pgprot);
522 pgprot_hyp_device = __pgprot(hyp_device_pgprot);
501 523
502 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 524 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
503 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 525 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
@@ -757,21 +779,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
757{ 779{
758 struct map_desc *md; 780 struct map_desc *md;
759 struct vm_struct *vm; 781 struct vm_struct *vm;
782 struct static_vm *svm;
760 783
761 if (!nr) 784 if (!nr)
762 return; 785 return;
763 786
764 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); 787 svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
765 788
766 for (md = io_desc; nr; md++, nr--) { 789 for (md = io_desc; nr; md++, nr--) {
767 create_mapping(md); 790 create_mapping(md);
791
792 vm = &svm->vm;
768 vm->addr = (void *)(md->virtual & PAGE_MASK); 793 vm->addr = (void *)(md->virtual & PAGE_MASK);
769 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 794 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
770 vm->phys_addr = __pfn_to_phys(md->pfn); 795 vm->phys_addr = __pfn_to_phys(md->pfn);
771 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 796 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
772 vm->flags |= VM_ARM_MTYPE(md->type); 797 vm->flags |= VM_ARM_MTYPE(md->type);
773 vm->caller = iotable_init; 798 vm->caller = iotable_init;
774 vm_area_add_early(vm++); 799 add_static_vm_early(svm++);
775 } 800 }
776} 801}
777 802
@@ -779,13 +804,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
779 void *caller) 804 void *caller)
780{ 805{
781 struct vm_struct *vm; 806 struct vm_struct *vm;
807 struct static_vm *svm;
808
809 svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
782 810
783 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); 811 vm = &svm->vm;
784 vm->addr = (void *)addr; 812 vm->addr = (void *)addr;
785 vm->size = size; 813 vm->size = size;
786 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; 814 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
787 vm->caller = caller; 815 vm->caller = caller;
788 vm_area_add_early(vm); 816 add_static_vm_early(svm);
789} 817}
790 818
791#ifndef CONFIG_ARM_LPAE 819#ifndef CONFIG_ARM_LPAE
@@ -810,14 +838,13 @@ static void __init pmd_empty_section_gap(unsigned long addr)
810 838
811static void __init fill_pmd_gaps(void) 839static void __init fill_pmd_gaps(void)
812{ 840{
841 struct static_vm *svm;
813 struct vm_struct *vm; 842 struct vm_struct *vm;
814 unsigned long addr, next = 0; 843 unsigned long addr, next = 0;
815 pmd_t *pmd; 844 pmd_t *pmd;
816 845
817 /* we're still single threaded hence no lock needed here */ 846 list_for_each_entry(svm, &static_vmlist, list) {
818 for (vm = vmlist; vm; vm = vm->next) { 847 vm = &svm->vm;
819 if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
820 continue;
821 addr = (unsigned long)vm->addr; 848 addr = (unsigned long)vm->addr;
822 if (addr < next) 849 if (addr < next)
823 continue; 850 continue;
@@ -857,19 +884,12 @@ static void __init fill_pmd_gaps(void)
857#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) 884#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
858static void __init pci_reserve_io(void) 885static void __init pci_reserve_io(void)
859{ 886{
860 struct vm_struct *vm; 887 struct static_vm *svm;
861 unsigned long addr;
862 888
863 /* we're still single threaded hence no lock needed here */ 889 svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
864 for (vm = vmlist; vm; vm = vm->next) { 890 if (svm)
865 if (!(vm->flags & VM_ARM_STATIC_MAPPING)) 891 return;
866 continue;
867 addr = (unsigned long)vm->addr;
868 addr &= ~(SZ_2M - 1);
869 if (addr == PCI_IO_VIRT_BASE)
870 return;
871 892
872 }
873 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); 893 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
874} 894}
875#else 895#else
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index eb6aa73bc8b7..f9a0aa725ea9 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -38,9 +38,14 @@
38 38
39/* 39/*
40 * mmid - get context id from mm pointer (mm->context.id) 40 * mmid - get context id from mm pointer (mm->context.id)
41 * note, this field is 64bit, so in big-endian the two words are swapped too.
41 */ 42 */
42 .macro mmid, rd, rn 43 .macro mmid, rd, rn
44#ifdef __ARMEB__
45 ldr \rd, [\rn, #MM_CONTEXT_ID + 4 ]
46#else
43 ldr \rd, [\rn, #MM_CONTEXT_ID] 47 ldr \rd, [\rn, #MM_CONTEXT_ID]
48#endif
44 .endm 49 .endm
45 50
46/* 51/*
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 09c5233f4dfc..bcaaa8de9325 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -101,7 +101,7 @@ ENTRY(cpu_v6_dcache_clean_area)
101ENTRY(cpu_v6_switch_mm) 101ENTRY(cpu_v6_switch_mm)
102#ifdef CONFIG_MMU 102#ifdef CONFIG_MMU
103 mov r2, #0 103 mov r2, #0
104 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 104 mmid r1, r1 @ get mm->context.id
105 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) 105 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
106 ALT_UP(orr r0, r0, #TTB_FLAGS_UP) 106 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
107 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 107 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 6d98c13ab827..78f520bc0e99 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -40,7 +40,7 @@
40ENTRY(cpu_v7_switch_mm) 40ENTRY(cpu_v7_switch_mm)
41#ifdef CONFIG_MMU 41#ifdef CONFIG_MMU
42 mov r2, #0 42 mov r2, #0
43 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 43 mmid r1, r1 @ get mm->context.id
44 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) 44 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
45 ALT_UP(orr r0, r0, #TTB_FLAGS_UP) 45 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
46#ifdef CONFIG_ARM_ERRATA_430973 46#ifdef CONFIG_ARM_ERRATA_430973
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 7b56386f9496..50bf1dafc9ea 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -47,7 +47,7 @@
47 */ 47 */
48ENTRY(cpu_v7_switch_mm) 48ENTRY(cpu_v7_switch_mm)
49#ifdef CONFIG_MMU 49#ifdef CONFIG_MMU
50 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 50 mmid r1, r1 @ get mm->context.id
51 and r3, r1, #0xff 51 and r3, r1, #0xff
52 mov r3, r3, lsl #(48 - 32) @ ASID 52 mov r3, r3, lsl #(48 - 32) @ ASID
53 mcrr p15, 0, r0, r3, c2 @ set TTB 0 53 mcrr p15, 0, r0, r3, c2 @ set TTB 0
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
deleted file mode 100644
index a631016e1f8f..000000000000
--- a/arch/arm/mm/vmregion.c
+++ /dev/null
@@ -1,205 +0,0 @@
1#include <linux/fs.h>
2#include <linux/spinlock.h>
3#include <linux/list.h>
4#include <linux/proc_fs.h>
5#include <linux/seq_file.h>
6#include <linux/slab.h>
7
8#include "vmregion.h"
9
10/*
11 * VM region handling support.
12 *
13 * This should become something generic, handling VM region allocations for
14 * vmalloc and similar (ioremap, module space, etc).
15 *
16 * I envisage vmalloc()'s supporting vm_struct becoming:
17 *
18 * struct vm_struct {
19 * struct vmregion region;
20 * unsigned long flags;
21 * struct page **pages;
22 * unsigned int nr_pages;
23 * unsigned long phys_addr;
24 * };
25 *
26 * get_vm_area() would then call vmregion_alloc with an appropriate
27 * struct vmregion head (eg):
28 *
29 * struct vmregion vmalloc_head = {
30 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
31 * .vm_start = VMALLOC_START,
32 * .vm_end = VMALLOC_END,
33 * };
34 *
35 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
36 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
37 * would have to initialise this each time prior to calling vmregion_alloc().
38 */
39
40struct arm_vmregion *
41arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
42 size_t size, gfp_t gfp, const void *caller)
43{
44 unsigned long start = head->vm_start, addr = head->vm_end;
45 unsigned long flags;
46 struct arm_vmregion *c, *new;
47
48 if (head->vm_end - head->vm_start < size) {
49 printk(KERN_WARNING "%s: allocation too big (requested %#x)\n",
50 __func__, size);
51 goto out;
52 }
53
54 new = kmalloc(sizeof(struct arm_vmregion), gfp);
55 if (!new)
56 goto out;
57
58 new->caller = caller;
59
60 spin_lock_irqsave(&head->vm_lock, flags);
61
62 addr = rounddown(addr - size, align);
63 list_for_each_entry_reverse(c, &head->vm_list, vm_list) {
64 if (addr >= c->vm_end)
65 goto found;
66 addr = rounddown(c->vm_start - size, align);
67 if (addr < start)
68 goto nospc;
69 }
70
71 found:
72 /*
73 * Insert this entry after the one we found.
74 */
75 list_add(&new->vm_list, &c->vm_list);
76 new->vm_start = addr;
77 new->vm_end = addr + size;
78 new->vm_active = 1;
79
80 spin_unlock_irqrestore(&head->vm_lock, flags);
81 return new;
82
83 nospc:
84 spin_unlock_irqrestore(&head->vm_lock, flags);
85 kfree(new);
86 out:
87 return NULL;
88}
89
90static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
91{
92 struct arm_vmregion *c;
93
94 list_for_each_entry(c, &head->vm_list, vm_list) {
95 if (c->vm_active && c->vm_start == addr)
96 goto out;
97 }
98 c = NULL;
99 out:
100 return c;
101}
102
103struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
104{
105 struct arm_vmregion *c;
106 unsigned long flags;
107
108 spin_lock_irqsave(&head->vm_lock, flags);
109 c = __arm_vmregion_find(head, addr);
110 spin_unlock_irqrestore(&head->vm_lock, flags);
111 return c;
112}
113
114struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr)
115{
116 struct arm_vmregion *c;
117 unsigned long flags;
118
119 spin_lock_irqsave(&head->vm_lock, flags);
120 c = __arm_vmregion_find(head, addr);
121 if (c)
122 c->vm_active = 0;
123 spin_unlock_irqrestore(&head->vm_lock, flags);
124 return c;
125}
126
127void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
128{
129 unsigned long flags;
130
131 spin_lock_irqsave(&head->vm_lock, flags);
132 list_del(&c->vm_list);
133 spin_unlock_irqrestore(&head->vm_lock, flags);
134
135 kfree(c);
136}
137
138#ifdef CONFIG_PROC_FS
139static int arm_vmregion_show(struct seq_file *m, void *p)
140{
141 struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list);
142
143 seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end,
144 c->vm_end - c->vm_start);
145 if (c->caller)
146 seq_printf(m, " %pS", (void *)c->caller);
147 seq_putc(m, '\n');
148 return 0;
149}
150
151static void *arm_vmregion_start(struct seq_file *m, loff_t *pos)
152{
153 struct arm_vmregion_head *h = m->private;
154 spin_lock_irq(&h->vm_lock);
155 return seq_list_start(&h->vm_list, *pos);
156}
157
158static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos)
159{
160 struct arm_vmregion_head *h = m->private;
161 return seq_list_next(p, &h->vm_list, pos);
162}
163
164static void arm_vmregion_stop(struct seq_file *m, void *p)
165{
166 struct arm_vmregion_head *h = m->private;
167 spin_unlock_irq(&h->vm_lock);
168}
169
170static const struct seq_operations arm_vmregion_ops = {
171 .start = arm_vmregion_start,
172 .stop = arm_vmregion_stop,
173 .next = arm_vmregion_next,
174 .show = arm_vmregion_show,
175};
176
177static int arm_vmregion_open(struct inode *inode, struct file *file)
178{
179 struct arm_vmregion_head *h = PDE(inode)->data;
180 int ret = seq_open(file, &arm_vmregion_ops);
181 if (!ret) {
182 struct seq_file *m = file->private_data;
183 m->private = h;
184 }
185 return ret;
186}
187
188static const struct file_operations arm_vmregion_fops = {
189 .open = arm_vmregion_open,
190 .read = seq_read,
191 .llseek = seq_lseek,
192 .release = seq_release,
193};
194
195int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
196{
197 proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h);
198 return 0;
199}
200#else
201int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
202{
203 return 0;
204}
205#endif
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
deleted file mode 100644
index 0f5a5f2a2c7b..000000000000
--- a/arch/arm/mm/vmregion.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef VMREGION_H
2#define VMREGION_H
3
4#include <linux/spinlock.h>
5#include <linux/list.h>
6
7struct page;
8
9struct arm_vmregion_head {
10 spinlock_t vm_lock;
11 struct list_head vm_list;
12 unsigned long vm_start;
13 unsigned long vm_end;
14};
15
16struct arm_vmregion {
17 struct list_head vm_list;
18 unsigned long vm_start;
19 unsigned long vm_end;
20 int vm_active;
21 const void *caller;
22};
23
24struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *);
25struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long);
26struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long);
27void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *);
28
29int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *);
30
31#endif
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index a34f1e214116..6828ef6ce80e 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -341,10 +341,17 @@ static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
341 341
342static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx) 342static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
343{ 343{
344 emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx); 344 /* r_dst = (r_src << 8) | (r_src >> 8) */
345 emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx); 345 emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
346 emit(ARM_LSL_I(r_dst, r_dst, 8), ctx); 346 emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
347 emit(ARM_LSL_R(r_dst, r_dst, 8), ctx); 347
348 /*
349 * we need to mask out the bits set in r_dst[23:16] due to
350 * the first shift instruction.
351 *
352 * note that 0x8ff is the encoded immediate 0x00ff0000.
353 */
354 emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
348} 355}
349 356
350#else /* ARMv6+ */ 357#else /* ARMv6+ */
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 7b433f3bddca..a0daa2fb5de6 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -808,11 +808,9 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
808 return -ENOMEM; 808 return -ENOMEM;
809 } 809 }
810 810
811 timer->io_base = devm_request_and_ioremap(dev, mem); 811 timer->io_base = devm_ioremap_resource(dev, mem);
812 if (!timer->io_base) { 812 if (IS_ERR(timer->io_base))
813 dev_err(dev, "%s: region already claimed.\n", __func__); 813 return PTR_ERR(timer->io_base);
814 return -ENOMEM;
815 }
816 814
817 if (dev->of_node) { 815 if (dev->of_node) {
818 if (of_find_property(dev->of_node, "ti,timer-alwon", NULL)) 816 if (of_find_property(dev->of_node, "ti,timer-alwon", NULL))
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
index 2d676ab50f73..ca07cb1b155a 100644
--- a/arch/arm/plat-samsung/adc.c
+++ b/arch/arm/plat-samsung/adc.c
@@ -386,11 +386,9 @@ static int s3c_adc_probe(struct platform_device *pdev)
386 return -ENXIO; 386 return -ENXIO;
387 } 387 }
388 388
389 adc->regs = devm_request_and_ioremap(dev, regs); 389 adc->regs = devm_ioremap_resource(dev, regs);
390 if (!adc->regs) { 390 if (IS_ERR(adc->regs))
391 dev_err(dev, "failed to map registers\n"); 391 return PTR_ERR(adc->regs);
392 return -ENXIO;
393 }
394 392
395 ret = regulator_enable(adc->vdd); 393 ret = regulator_enable(adc->vdd);
396 if (ret) 394 if (ret)
diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c
index d088afa034e8..71d58ddea9c1 100644
--- a/arch/arm/plat-samsung/dma-ops.c
+++ b/arch/arm/plat-samsung/dma-ops.c
@@ -19,7 +19,8 @@
19#include <mach/dma.h> 19#include <mach/dma.h>
20 20
21static unsigned samsung_dmadev_request(enum dma_ch dma_ch, 21static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
22 struct samsung_dma_req *param) 22 struct samsung_dma_req *param,
23 struct device *dev, char *ch_name)
23{ 24{
24 dma_cap_mask_t mask; 25 dma_cap_mask_t mask;
25 void *filter_param; 26 void *filter_param;
@@ -33,7 +34,12 @@ static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
33 */ 34 */
34 filter_param = (dma_ch == DMACH_DT_PROP) ? 35 filter_param = (dma_ch == DMACH_DT_PROP) ?
35 (void *)param->dt_dmach_prop : (void *)dma_ch; 36 (void *)param->dt_dmach_prop : (void *)dma_ch;
36 return (unsigned)dma_request_channel(mask, pl330_filter, filter_param); 37
38 if (dev->of_node)
39 return (unsigned)dma_request_slave_channel(dev, ch_name);
40 else
41 return (unsigned)dma_request_channel(mask, pl330_filter,
42 filter_param);
37} 43}
38 44
39static int samsung_dmadev_release(unsigned ch, void *param) 45static int samsung_dmadev_release(unsigned ch, void *param)
diff --git a/arch/arm/plat-samsung/include/plat/adc.h b/arch/arm/plat-samsung/include/plat/adc.h
index b258a08de591..2fc89315553f 100644
--- a/arch/arm/plat-samsung/include/plat/adc.h
+++ b/arch/arm/plat-samsung/include/plat/adc.h
@@ -15,6 +15,7 @@
15#define __ASM_PLAT_ADC_H __FILE__ 15#define __ASM_PLAT_ADC_H __FILE__
16 16
17struct s3c_adc_client; 17struct s3c_adc_client;
18struct platform_device;
18 19
19extern int s3c_adc_start(struct s3c_adc_client *client, 20extern int s3c_adc_start(struct s3c_adc_client *client,
20 unsigned int channel, unsigned int nr_samples); 21 unsigned int channel, unsigned int nr_samples);
diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
index f5144cdd3001..114178268b75 100644
--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
@@ -39,7 +39,8 @@ struct samsung_dma_config {
39}; 39};
40 40
41struct samsung_dma_ops { 41struct samsung_dma_ops {
42 unsigned (*request)(enum dma_ch ch, struct samsung_dma_req *param); 42 unsigned (*request)(enum dma_ch ch, struct samsung_dma_req *param,
43 struct device *dev, char *ch_name);
43 int (*release)(unsigned ch, void *param); 44 int (*release)(unsigned ch, void *param);
44 int (*config)(unsigned ch, struct samsung_dma_config *param); 45 int (*config)(unsigned ch, struct samsung_dma_config *param);
45 int (*prepare)(unsigned ch, struct samsung_dma_prep *param); 46 int (*prepare)(unsigned ch, struct samsung_dma_prep *param);
diff --git a/arch/arm/plat-samsung/s3c-dma-ops.c b/arch/arm/plat-samsung/s3c-dma-ops.c
index f99448c48d30..0cc40aea3f5a 100644
--- a/arch/arm/plat-samsung/s3c-dma-ops.c
+++ b/arch/arm/plat-samsung/s3c-dma-ops.c
@@ -36,7 +36,8 @@ static void s3c_dma_cb(struct s3c2410_dma_chan *channel, void *param,
36} 36}
37 37
38static unsigned s3c_dma_request(enum dma_ch dma_ch, 38static unsigned s3c_dma_request(enum dma_ch dma_ch,
39 struct samsung_dma_req *param) 39 struct samsung_dma_req *param,
40 struct device *dev, char *ch_name)
40{ 41{
41 struct cb_data *data; 42 struct cb_data *data;
42 43