diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 13:31:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 13:31:38 -0400 |
commit | 1873e50028ce87dd9014049c86d71a898fa02166 (patch) | |
tree | 046d37339278c3b88f0c248e9e6ff5fed804fe62 /arch | |
parent | fb2af0020a51709ad87ea8055c325d3fbde04158 (diff) | |
parent | aa729dccb5e8dfbc78e2e235b8754d6acccee731 (diff) |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64
Pull ARM64 updates from Catalin Marinas:
"Main features:
- KVM and Xen ports to AArch64
- Hugetlbfs and transparent huge pages support for arm64
- Applied Micro X-Gene Kconfig entry and dts file
- Cache flushing improvements
For arm64 huge pages support, there are x86 changes moving part of
arch/x86/mm/hugetlbpage.c into mm/hugetlb.c to be re-used by arm64"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64: (66 commits)
arm64: Add initial DTS for APM X-Gene Storm SOC and APM Mustang board
arm64: Add defines for APM ARMv8 implementation
arm64: Enable APM X-Gene SOC family in the defconfig
arm64: Add Kconfig option for APM X-Gene SOC family
arm64/Makefile: provide vdso_install target
ARM64: mm: THP support.
ARM64: mm: Raise MAX_ORDER for 64KB pages and THP.
ARM64: mm: HugeTLB support.
ARM64: mm: Move PTE_PROT_NONE bit.
ARM64: mm: Make PAGE_NONE pages read only and no-execute.
ARM64: mm: Restore memblock limit when map_mem finished.
mm: thp: Correct the HPAGE_PMD_ORDER check.
x86: mm: Remove general hugetlb code from x86.
mm: hugetlb: Copy general hugetlb code from x86 to mm.
x86: mm: Remove x86 version of huge_pmd_share.
mm: hugetlb: Copy huge_pmd_share from x86 to mm.
arm64: KVM: document kernel object mappings in HYP
arm64: KVM: MAINTAINERS update
arm64: KVM: userspace API documentation
arm64: KVM: enable initialization of a 32bit vcpu
...
Diffstat (limited to 'arch')
68 files changed, 5297 insertions, 359 deletions
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 30cdacb675af..359a7b50b158 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef _ASM_ARM_XEN_PAGE_H | 1 | #ifndef _ASM_ARM_XEN_PAGE_H |
2 | #define _ASM_ARM_XEN_PAGE_H | 2 | #define _ASM_ARM_XEN_PAGE_H |
3 | 3 | ||
4 | #include <asm/mach/map.h> | ||
5 | #include <asm/page.h> | 4 | #include <asm/page.h> |
6 | #include <asm/pgtable.h> | 5 | #include <asm/pgtable.h> |
7 | 6 | ||
@@ -88,6 +87,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
88 | return __set_phys_to_machine(pfn, mfn); | 87 | return __set_phys_to_machine(pfn, mfn); |
89 | } | 88 | } |
90 | 89 | ||
91 | #define xen_remap(cookie, size) __arm_ioremap((cookie), (size), MT_MEMORY); | 90 | #define xen_remap(cookie, size) ioremap_cached((cookie), (size)); |
92 | 91 | ||
93 | #endif /* _ASM_ARM_XEN_PAGE_H */ | 92 | #endif /* _ASM_ARM_XEN_PAGE_H */ |
diff --git a/arch/arm/kvm/arch_timer.c b/arch/arm/kvm/arch_timer.c index c55b6089e923..49a7516d81c7 100644 --- a/arch/arm/kvm/arch_timer.c +++ b/arch/arm/kvm/arch_timer.c | |||
@@ -195,6 +195,7 @@ static struct notifier_block kvm_timer_cpu_nb = { | |||
195 | 195 | ||
196 | static const struct of_device_id arch_timer_of_match[] = { | 196 | static const struct of_device_id arch_timer_of_match[] = { |
197 | { .compatible = "arm,armv7-timer", }, | 197 | { .compatible = "arm,armv7-timer", }, |
198 | { .compatible = "arm,armv8-timer", }, | ||
198 | {}, | 199 | {}, |
199 | }; | 200 | }; |
200 | 201 | ||
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 56b3f6d447ae..4143d9b0d87a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -7,6 +7,7 @@ config ARM64 | |||
7 | select ARM_AMBA | 7 | select ARM_AMBA |
8 | select ARM_ARCH_TIMER | 8 | select ARM_ARCH_TIMER |
9 | select ARM_GIC | 9 | select ARM_GIC |
10 | select BUILDTIME_EXTABLE_SORT | ||
10 | select CLONE_BACKWARDS | 11 | select CLONE_BACKWARDS |
11 | select COMMON_CLK | 12 | select COMMON_CLK |
12 | select GENERIC_CLOCKEVENTS | 13 | select GENERIC_CLOCKEVENTS |
@@ -111,6 +112,11 @@ config ARCH_VEXPRESS | |||
111 | This enables support for the ARMv8 software model (Versatile | 112 | This enables support for the ARMv8 software model (Versatile |
112 | Express). | 113 | Express). |
113 | 114 | ||
115 | config ARCH_XGENE | ||
116 | bool "AppliedMicro X-Gene SOC Family" | ||
117 | help | ||
118 | This enables support for AppliedMicro X-Gene SOC Family | ||
119 | |||
114 | endmenu | 120 | endmenu |
115 | 121 | ||
116 | menu "Bus support" | 122 | menu "Bus support" |
@@ -148,6 +154,8 @@ config NR_CPUS | |||
148 | int "Maximum number of CPUs (2-32)" | 154 | int "Maximum number of CPUs (2-32)" |
149 | range 2 32 | 155 | range 2 32 |
150 | depends on SMP | 156 | depends on SMP |
157 | # These have to remain sorted largest to smallest | ||
158 | default "8" if ARCH_XGENE | ||
151 | default "4" | 159 | default "4" |
152 | 160 | ||
153 | source kernel/Kconfig.preempt | 161 | source kernel/Kconfig.preempt |
@@ -180,8 +188,35 @@ config HW_PERF_EVENTS | |||
180 | Enable hardware performance counter support for perf events. If | 188 | Enable hardware performance counter support for perf events. If |
181 | disabled, perf events will use software events only. | 189 | disabled, perf events will use software events only. |
182 | 190 | ||
191 | config SYS_SUPPORTS_HUGETLBFS | ||
192 | def_bool y | ||
193 | |||
194 | config ARCH_WANT_GENERAL_HUGETLB | ||
195 | def_bool y | ||
196 | |||
197 | config ARCH_WANT_HUGE_PMD_SHARE | ||
198 | def_bool y if !ARM64_64K_PAGES | ||
199 | |||
200 | config HAVE_ARCH_TRANSPARENT_HUGEPAGE | ||
201 | def_bool y | ||
202 | |||
183 | source "mm/Kconfig" | 203 | source "mm/Kconfig" |
184 | 204 | ||
205 | config XEN_DOM0 | ||
206 | def_bool y | ||
207 | depends on XEN | ||
208 | |||
209 | config XEN | ||
210 | bool "Xen guest support on ARM64 (EXPERIMENTAL)" | ||
211 | depends on ARM64 && OF | ||
212 | help | ||
213 | Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64. | ||
214 | |||
215 | config FORCE_MAX_ZONEORDER | ||
216 | int | ||
217 | default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE) | ||
218 | default "11" | ||
219 | |||
185 | endmenu | 220 | endmenu |
186 | 221 | ||
187 | menu "Boot options" | 222 | menu "Boot options" |
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index c95c5cb212fd..d90cf79f233a 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
@@ -37,6 +37,8 @@ TEXT_OFFSET := 0x00080000 | |||
37 | export TEXT_OFFSET GZFLAGS | 37 | export TEXT_OFFSET GZFLAGS |
38 | 38 | ||
39 | core-y += arch/arm64/kernel/ arch/arm64/mm/ | 39 | core-y += arch/arm64/kernel/ arch/arm64/mm/ |
40 | core-$(CONFIG_KVM) += arch/arm64/kvm/ | ||
41 | core-$(CONFIG_XEN) += arch/arm64/xen/ | ||
40 | libs-y := arch/arm64/lib/ $(libs-y) | 42 | libs-y := arch/arm64/lib/ $(libs-y) |
41 | libs-y += $(LIBGCC) | 43 | libs-y += $(LIBGCC) |
42 | 44 | ||
@@ -60,6 +62,10 @@ zinstall install: vmlinux | |||
60 | dtbs: scripts | 62 | dtbs: scripts |
61 | $(Q)$(MAKE) $(build)=$(boot)/dts dtbs | 63 | $(Q)$(MAKE) $(build)=$(boot)/dts dtbs |
62 | 64 | ||
65 | PHONY += vdso_install | ||
66 | vdso_install: | ||
67 | $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@ | ||
68 | |||
63 | # We use MRPROPER_FILES and CLEAN_FILES now | 69 | # We use MRPROPER_FILES and CLEAN_FILES now |
64 | archclean: | 70 | archclean: |
65 | $(Q)$(MAKE) $(clean)=$(boot) | 71 | $(Q)$(MAKE) $(clean)=$(boot) |
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile index 68457e9e0975..c52bdb051f66 100644 --- a/arch/arm64/boot/dts/Makefile +++ b/arch/arm64/boot/dts/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb | 1 | dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb |
2 | dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb | ||
2 | 3 | ||
3 | targets += dtbs | 4 | targets += dtbs |
4 | targets += $(dtb-y) | 5 | targets += $(dtb-y) |
diff --git a/arch/arm64/boot/dts/apm-mustang.dts b/arch/arm64/boot/dts/apm-mustang.dts new file mode 100644 index 000000000000..1247ca1200b1 --- /dev/null +++ b/arch/arm64/boot/dts/apm-mustang.dts | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * dts file for AppliedMicro (APM) Mustang Board | ||
3 | * | ||
4 | * Copyright (C) 2013, Applied Micro Circuits Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License as | ||
8 | * published by the Free Software Foundation; either version 2 of | ||
9 | * the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | /dts-v1/; | ||
13 | |||
14 | /include/ "apm-storm.dtsi" | ||
15 | |||
16 | / { | ||
17 | model = "APM X-Gene Mustang board"; | ||
18 | compatible = "apm,mustang", "apm,xgene-storm"; | ||
19 | |||
20 | chosen { }; | ||
21 | |||
22 | memory { | ||
23 | device_type = "memory"; | ||
24 | reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */ | ||
25 | }; | ||
26 | }; | ||
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi new file mode 100644 index 000000000000..bfdc57834929 --- /dev/null +++ b/arch/arm64/boot/dts/apm-storm.dtsi | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * dts file for AppliedMicro (APM) X-Gene Storm SOC | ||
3 | * | ||
4 | * Copyright (C) 2013, Applied Micro Circuits Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License as | ||
8 | * published by the Free Software Foundation; either version 2 of | ||
9 | * the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | / { | ||
13 | compatible = "apm,xgene-storm"; | ||
14 | interrupt-parent = <&gic>; | ||
15 | #address-cells = <2>; | ||
16 | #size-cells = <2>; | ||
17 | |||
18 | cpus { | ||
19 | #address-cells = <2>; | ||
20 | #size-cells = <0>; | ||
21 | |||
22 | cpu@000 { | ||
23 | device_type = "cpu"; | ||
24 | compatible = "apm,potenza", "arm,armv8"; | ||
25 | reg = <0x0 0x000>; | ||
26 | enable-method = "spin-table"; | ||
27 | cpu-release-addr = <0x1 0x0000fff8>; | ||
28 | }; | ||
29 | cpu@001 { | ||
30 | device_type = "cpu"; | ||
31 | compatible = "apm,potenza", "arm,armv8"; | ||
32 | reg = <0x0 0x001>; | ||
33 | enable-method = "spin-table"; | ||
34 | cpu-release-addr = <0x1 0x0000fff8>; | ||
35 | }; | ||
36 | cpu@100 { | ||
37 | device_type = "cpu"; | ||
38 | compatible = "apm,potenza", "arm,armv8"; | ||
39 | reg = <0x0 0x100>; | ||
40 | enable-method = "spin-table"; | ||
41 | cpu-release-addr = <0x1 0x0000fff8>; | ||
42 | }; | ||
43 | cpu@101 { | ||
44 | device_type = "cpu"; | ||
45 | compatible = "apm,potenza", "arm,armv8"; | ||
46 | reg = <0x0 0x101>; | ||
47 | enable-method = "spin-table"; | ||
48 | cpu-release-addr = <0x1 0x0000fff8>; | ||
49 | }; | ||
50 | cpu@200 { | ||
51 | device_type = "cpu"; | ||
52 | compatible = "apm,potenza", "arm,armv8"; | ||
53 | reg = <0x0 0x200>; | ||
54 | enable-method = "spin-table"; | ||
55 | cpu-release-addr = <0x1 0x0000fff8>; | ||
56 | }; | ||
57 | cpu@201 { | ||
58 | device_type = "cpu"; | ||
59 | compatible = "apm,potenza", "arm,armv8"; | ||
60 | reg = <0x0 0x201>; | ||
61 | enable-method = "spin-table"; | ||
62 | cpu-release-addr = <0x1 0x0000fff8>; | ||
63 | }; | ||
64 | cpu@300 { | ||
65 | device_type = "cpu"; | ||
66 | compatible = "apm,potenza", "arm,armv8"; | ||
67 | reg = <0x0 0x300>; | ||
68 | enable-method = "spin-table"; | ||
69 | cpu-release-addr = <0x1 0x0000fff8>; | ||
70 | }; | ||
71 | cpu@301 { | ||
72 | device_type = "cpu"; | ||
73 | compatible = "apm,potenza", "arm,armv8"; | ||
74 | reg = <0x0 0x301>; | ||
75 | enable-method = "spin-table"; | ||
76 | cpu-release-addr = <0x1 0x0000fff8>; | ||
77 | }; | ||
78 | }; | ||
79 | |||
80 | gic: interrupt-controller@78010000 { | ||
81 | compatible = "arm,cortex-a15-gic"; | ||
82 | #interrupt-cells = <3>; | ||
83 | interrupt-controller; | ||
84 | reg = <0x0 0x78010000 0x0 0x1000>, /* GIC Dist */ | ||
85 | <0x0 0x78020000 0x0 0x1000>, /* GIC CPU */ | ||
86 | <0x0 0x78040000 0x0 0x2000>, /* GIC VCPU Control */ | ||
87 | <0x0 0x78060000 0x0 0x2000>; /* GIC VCPU */ | ||
88 | interrupts = <1 9 0xf04>; /* GIC Maintenence IRQ */ | ||
89 | }; | ||
90 | |||
91 | timer { | ||
92 | compatible = "arm,armv8-timer"; | ||
93 | interrupts = <1 0 0xff01>, /* Secure Phys IRQ */ | ||
94 | <1 13 0xff01>, /* Non-secure Phys IRQ */ | ||
95 | <1 14 0xff01>, /* Virt IRQ */ | ||
96 | <1 15 0xff01>; /* Hyp IRQ */ | ||
97 | clock-frequency = <50000000>; | ||
98 | }; | ||
99 | |||
100 | soc { | ||
101 | compatible = "simple-bus"; | ||
102 | #address-cells = <2>; | ||
103 | #size-cells = <2>; | ||
104 | ranges; | ||
105 | |||
106 | serial0: serial@1c020000 { | ||
107 | device_type = "serial"; | ||
108 | compatible = "ns16550"; | ||
109 | reg = <0 0x1c020000 0x0 0x1000>; | ||
110 | reg-shift = <2>; | ||
111 | clock-frequency = <10000000>; /* Updated by bootloader */ | ||
112 | interrupt-parent = <&gic>; | ||
113 | interrupts = <0x0 0x4c 0x4>; | ||
114 | }; | ||
115 | }; | ||
116 | }; | ||
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 8d9696adb440..5b3e83217b03 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -24,6 +24,7 @@ CONFIG_MODULE_UNLOAD=y | |||
24 | # CONFIG_BLK_DEV_BSG is not set | 24 | # CONFIG_BLK_DEV_BSG is not set |
25 | # CONFIG_IOSCHED_DEADLINE is not set | 25 | # CONFIG_IOSCHED_DEADLINE is not set |
26 | CONFIG_ARCH_VEXPRESS=y | 26 | CONFIG_ARCH_VEXPRESS=y |
27 | CONFIG_ARCH_XGENE=y | ||
27 | CONFIG_SMP=y | 28 | CONFIG_SMP=y |
28 | CONFIG_PREEMPT_VOLUNTARY=y | 29 | CONFIG_PREEMPT_VOLUNTARY=y |
29 | CONFIG_CMDLINE="console=ttyAMA0" | 30 | CONFIG_CMDLINE="console=ttyAMA0" |
@@ -54,6 +55,9 @@ CONFIG_INPUT_EVDEV=y | |||
54 | # CONFIG_SERIO_I8042 is not set | 55 | # CONFIG_SERIO_I8042 is not set |
55 | # CONFIG_SERIO_SERPORT is not set | 56 | # CONFIG_SERIO_SERPORT is not set |
56 | CONFIG_LEGACY_PTY_COUNT=16 | 57 | CONFIG_LEGACY_PTY_COUNT=16 |
58 | CONFIG_SERIAL_8250=y | ||
59 | CONFIG_SERIAL_8250_CONSOLE=y | ||
60 | CONFIG_SERIAL_OF_PLATFORM=y | ||
57 | CONFIG_SERIAL_AMBA_PL011=y | 61 | CONFIG_SERIAL_AMBA_PL011=y |
58 | CONFIG_SERIAL_AMBA_PL011_CONSOLE=y | 62 | CONFIG_SERIAL_AMBA_PL011_CONSOLE=y |
59 | # CONFIG_HW_RANDOM is not set | 63 | # CONFIG_HW_RANDOM is not set |
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 3300cbd18a89..fea9ee327206 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h | |||
@@ -123,9 +123,6 @@ static inline void __flush_icache_all(void) | |||
123 | #define flush_dcache_mmap_unlock(mapping) \ | 123 | #define flush_dcache_mmap_unlock(mapping) \ |
124 | spin_unlock_irq(&(mapping)->tree_lock) | 124 | spin_unlock_irq(&(mapping)->tree_lock) |
125 | 125 | ||
126 | #define flush_icache_user_range(vma,page,addr,len) \ | ||
127 | flush_dcache_page(page) | ||
128 | |||
129 | /* | 126 | /* |
130 | * We don't appear to need to do anything here. In fact, if we did, we'd | 127 | * We don't appear to need to do anything here. In fact, if we did, we'd |
131 | * duplicate cache flushing elsewhere performed by flush_dcache_page(). | 128 | * duplicate cache flushing elsewhere performed by flush_dcache_page(). |
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index cf2749488cd4..5fe138e0b828 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h | |||
@@ -37,11 +37,14 @@ | |||
37 | }) | 37 | }) |
38 | 38 | ||
39 | #define ARM_CPU_IMP_ARM 0x41 | 39 | #define ARM_CPU_IMP_ARM 0x41 |
40 | #define ARM_CPU_IMP_APM 0x50 | ||
40 | 41 | ||
41 | #define ARM_CPU_PART_AEM_V8 0xD0F0 | 42 | #define ARM_CPU_PART_AEM_V8 0xD0F0 |
42 | #define ARM_CPU_PART_FOUNDATION 0xD000 | 43 | #define ARM_CPU_PART_FOUNDATION 0xD000 |
43 | #define ARM_CPU_PART_CORTEX_A57 0xD070 | 44 | #define ARM_CPU_PART_CORTEX_A57 0xD070 |
44 | 45 | ||
46 | #define APM_CPU_PART_POTENZA 0x0000 | ||
47 | |||
45 | #ifndef __ASSEMBLY__ | 48 | #ifndef __ASSEMBLY__ |
46 | 49 | ||
47 | /* | 50 | /* |
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 7eaa0b302493..ef8235c68c09 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h | |||
@@ -83,6 +83,15 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs) | |||
83 | } | 83 | } |
84 | #endif | 84 | #endif |
85 | 85 | ||
86 | #ifdef CONFIG_COMPAT | ||
87 | int aarch32_break_handler(struct pt_regs *regs); | ||
88 | #else | ||
89 | static int aarch32_break_handler(struct pt_regs *regs) | ||
90 | { | ||
91 | return -EFAULT; | ||
92 | } | ||
93 | #endif | ||
94 | |||
86 | #endif /* __ASSEMBLY */ | 95 | #endif /* __ASSEMBLY */ |
87 | #endif /* __KERNEL__ */ | 96 | #endif /* __KERNEL__ */ |
88 | #endif /* __ASM_DEBUG_MONITORS_H */ | 97 | #endif /* __ASM_DEBUG_MONITORS_H */ |
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h index 0d8453c755a8..cf98b362094b 100644 --- a/arch/arm64/include/asm/device.h +++ b/arch/arm64/include/asm/device.h | |||
@@ -18,6 +18,9 @@ | |||
18 | 18 | ||
19 | struct dev_archdata { | 19 | struct dev_archdata { |
20 | struct dma_map_ops *dma_ops; | 20 | struct dma_map_ops *dma_ops; |
21 | #ifdef CONFIG_IOMMU_API | ||
22 | void *iommu; /* private IOMMU data */ | ||
23 | #endif | ||
21 | }; | 24 | }; |
22 | 25 | ||
23 | struct pdev_archdata { | 26 | struct pdev_archdata { |
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 994776894198..8d1810001aef 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h | |||
@@ -81,8 +81,12 @@ static inline void dma_mark_clean(void *addr, size_t size) | |||
81 | { | 81 | { |
82 | } | 82 | } |
83 | 83 | ||
84 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | 84 | #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) |
85 | dma_addr_t *dma_handle, gfp_t flags) | 85 | #define dma_free_coherent(d, s, h, f) dma_free_attrs(d, s, h, f, NULL) |
86 | |||
87 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | ||
88 | dma_addr_t *dma_handle, gfp_t flags, | ||
89 | struct dma_attrs *attrs) | ||
86 | { | 90 | { |
87 | struct dma_map_ops *ops = get_dma_ops(dev); | 91 | struct dma_map_ops *ops = get_dma_ops(dev); |
88 | void *vaddr; | 92 | void *vaddr; |
@@ -90,13 +94,14 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |||
90 | if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr)) | 94 | if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr)) |
91 | return vaddr; | 95 | return vaddr; |
92 | 96 | ||
93 | vaddr = ops->alloc(dev, size, dma_handle, flags, NULL); | 97 | vaddr = ops->alloc(dev, size, dma_handle, flags, attrs); |
94 | debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr); | 98 | debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr); |
95 | return vaddr; | 99 | return vaddr; |
96 | } | 100 | } |
97 | 101 | ||
98 | static inline void dma_free_coherent(struct device *dev, size_t size, | 102 | static inline void dma_free_attrs(struct device *dev, size_t size, |
99 | void *vaddr, dma_addr_t dev_addr) | 103 | void *vaddr, dma_addr_t dev_addr, |
104 | struct dma_attrs *attrs) | ||
100 | { | 105 | { |
101 | struct dma_map_ops *ops = get_dma_ops(dev); | 106 | struct dma_map_ops *ops = get_dma_ops(dev); |
102 | 107 | ||
@@ -104,7 +109,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size, | |||
104 | return; | 109 | return; |
105 | 110 | ||
106 | debug_dma_free_coherent(dev, size, vaddr, dev_addr); | 111 | debug_dma_free_coherent(dev, size, vaddr, dev_addr); |
107 | ops->free(dev, size, vaddr, dev_addr, NULL); | 112 | ops->free(dev, size, vaddr, dev_addr, attrs); |
108 | } | 113 | } |
109 | 114 | ||
110 | /* | 115 | /* |
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h new file mode 100644 index 000000000000..5b7ca8ace95f --- /dev/null +++ b/arch/arm64/include/asm/hugetlb.h | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * arch/arm64/include/asm/hugetlb.h | ||
3 | * | ||
4 | * Copyright (C) 2013 Linaro Ltd. | ||
5 | * | ||
6 | * Based on arch/x86/include/asm/hugetlb.h | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | |||
22 | #ifndef __ASM_HUGETLB_H | ||
23 | #define __ASM_HUGETLB_H | ||
24 | |||
25 | #include <asm-generic/hugetlb.h> | ||
26 | #include <asm/page.h> | ||
27 | |||
28 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
29 | { | ||
30 | return *ptep; | ||
31 | } | ||
32 | |||
33 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
34 | pte_t *ptep, pte_t pte) | ||
35 | { | ||
36 | set_pte_at(mm, addr, ptep, pte); | ||
37 | } | ||
38 | |||
39 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | ||
40 | unsigned long addr, pte_t *ptep) | ||
41 | { | ||
42 | ptep_clear_flush(vma, addr, ptep); | ||
43 | } | ||
44 | |||
45 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
46 | unsigned long addr, pte_t *ptep) | ||
47 | { | ||
48 | ptep_set_wrprotect(mm, addr, ptep); | ||
49 | } | ||
50 | |||
51 | static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | ||
52 | unsigned long addr, pte_t *ptep) | ||
53 | { | ||
54 | return ptep_get_and_clear(mm, addr, ptep); | ||
55 | } | ||
56 | |||
57 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
58 | unsigned long addr, pte_t *ptep, | ||
59 | pte_t pte, int dirty) | ||
60 | { | ||
61 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); | ||
62 | } | ||
63 | |||
64 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | ||
65 | unsigned long addr, unsigned long end, | ||
66 | unsigned long floor, | ||
67 | unsigned long ceiling) | ||
68 | { | ||
69 | free_pgd_range(tlb, addr, end, floor, ceiling); | ||
70 | } | ||
71 | |||
72 | static inline int is_hugepage_only_range(struct mm_struct *mm, | ||
73 | unsigned long addr, unsigned long len) | ||
74 | { | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | static inline int prepare_hugepage_range(struct file *file, | ||
79 | unsigned long addr, unsigned long len) | ||
80 | { | ||
81 | struct hstate *h = hstate_file(file); | ||
82 | if (len & ~huge_page_mask(h)) | ||
83 | return -EINVAL; | ||
84 | if (addr & ~huge_page_mask(h)) | ||
85 | return -EINVAL; | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) | ||
90 | { | ||
91 | } | ||
92 | |||
93 | static inline int huge_pte_none(pte_t pte) | ||
94 | { | ||
95 | return pte_none(pte); | ||
96 | } | ||
97 | |||
98 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
99 | { | ||
100 | return pte_wrprotect(pte); | ||
101 | } | ||
102 | |||
103 | static inline int arch_prepare_hugepage(struct page *page) | ||
104 | { | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static inline void arch_release_hugepage(struct page *page) | ||
109 | { | ||
110 | } | ||
111 | |||
112 | static inline void arch_clear_hugepage_flags(struct page *page) | ||
113 | { | ||
114 | clear_bit(PG_dcache_clean, &page->flags); | ||
115 | } | ||
116 | |||
117 | #endif /* __ASM_HUGETLB_H */ | ||
diff --git a/arch/arm64/include/asm/hypervisor.h b/arch/arm64/include/asm/hypervisor.h new file mode 100644 index 000000000000..d2c79049ff11 --- /dev/null +++ b/arch/arm64/include/asm/hypervisor.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_ARM64_HYPERVISOR_H | ||
2 | #define _ASM_ARM64_HYPERVISOR_H | ||
3 | |||
4 | #include <asm/xen/hypervisor.h> | ||
5 | |||
6 | #endif | ||
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 2e12258aa7e4..1d12f89140ba 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h | |||
@@ -228,10 +228,12 @@ extern void __iounmap(volatile void __iomem *addr); | |||
228 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) | 228 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) |
229 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) | 229 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) |
230 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) | 230 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) |
231 | #define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) | ||
231 | 232 | ||
232 | #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) | 233 | #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) |
233 | #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) | 234 | #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) |
234 | #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) | 235 | #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) |
236 | #define ioremap_cached(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL)) | ||
235 | #define iounmap __iounmap | 237 | #define iounmap __iounmap |
236 | 238 | ||
237 | #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) | 239 | #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) |
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h new file mode 100644 index 000000000000..a5f28e2720c7 --- /dev/null +++ b/arch/arm64/include/asm/kvm_arm.h | |||
@@ -0,0 +1,245 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM64_KVM_ARM_H__ | ||
19 | #define __ARM64_KVM_ARM_H__ | ||
20 | |||
21 | #include <asm/types.h> | ||
22 | |||
23 | /* Hyp Configuration Register (HCR) bits */ | ||
24 | #define HCR_ID (UL(1) << 33) | ||
25 | #define HCR_CD (UL(1) << 32) | ||
26 | #define HCR_RW_SHIFT 31 | ||
27 | #define HCR_RW (UL(1) << HCR_RW_SHIFT) | ||
28 | #define HCR_TRVM (UL(1) << 30) | ||
29 | #define HCR_HCD (UL(1) << 29) | ||
30 | #define HCR_TDZ (UL(1) << 28) | ||
31 | #define HCR_TGE (UL(1) << 27) | ||
32 | #define HCR_TVM (UL(1) << 26) | ||
33 | #define HCR_TTLB (UL(1) << 25) | ||
34 | #define HCR_TPU (UL(1) << 24) | ||
35 | #define HCR_TPC (UL(1) << 23) | ||
36 | #define HCR_TSW (UL(1) << 22) | ||
37 | #define HCR_TAC (UL(1) << 21) | ||
38 | #define HCR_TIDCP (UL(1) << 20) | ||
39 | #define HCR_TSC (UL(1) << 19) | ||
40 | #define HCR_TID3 (UL(1) << 18) | ||
41 | #define HCR_TID2 (UL(1) << 17) | ||
42 | #define HCR_TID1 (UL(1) << 16) | ||
43 | #define HCR_TID0 (UL(1) << 15) | ||
44 | #define HCR_TWE (UL(1) << 14) | ||
45 | #define HCR_TWI (UL(1) << 13) | ||
46 | #define HCR_DC (UL(1) << 12) | ||
47 | #define HCR_BSU (3 << 10) | ||
48 | #define HCR_BSU_IS (UL(1) << 10) | ||
49 | #define HCR_FB (UL(1) << 9) | ||
50 | #define HCR_VA (UL(1) << 8) | ||
51 | #define HCR_VI (UL(1) << 7) | ||
52 | #define HCR_VF (UL(1) << 6) | ||
53 | #define HCR_AMO (UL(1) << 5) | ||
54 | #define HCR_IMO (UL(1) << 4) | ||
55 | #define HCR_FMO (UL(1) << 3) | ||
56 | #define HCR_PTW (UL(1) << 2) | ||
57 | #define HCR_SWIO (UL(1) << 1) | ||
58 | #define HCR_VM (UL(1) << 0) | ||
59 | |||
60 | /* | ||
61 | * The bits we set in HCR: | ||
62 | * RW: 64bit by default, can be overriden for 32bit VMs | ||
63 | * TAC: Trap ACTLR | ||
64 | * TSC: Trap SMC | ||
65 | * TSW: Trap cache operations by set/way | ||
66 | * TWI: Trap WFI | ||
67 | * TIDCP: Trap L2CTLR/L2ECTLR | ||
68 | * BSU_IS: Upgrade barriers to the inner shareable domain | ||
69 | * FB: Force broadcast of all maintainance operations | ||
70 | * AMO: Override CPSR.A and enable signaling with VA | ||
71 | * IMO: Override CPSR.I and enable signaling with VI | ||
72 | * FMO: Override CPSR.F and enable signaling with VF | ||
73 | * SWIO: Turn set/way invalidates into set/way clean+invalidate | ||
74 | */ | ||
75 | #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ | ||
76 | HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ | ||
77 | HCR_SWIO | HCR_TIDCP | HCR_RW) | ||
78 | #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) | ||
79 | |||
80 | /* Hyp System Control Register (SCTLR_EL2) bits */ | ||
81 | #define SCTLR_EL2_EE (1 << 25) | ||
82 | #define SCTLR_EL2_WXN (1 << 19) | ||
83 | #define SCTLR_EL2_I (1 << 12) | ||
84 | #define SCTLR_EL2_SA (1 << 3) | ||
85 | #define SCTLR_EL2_C (1 << 2) | ||
86 | #define SCTLR_EL2_A (1 << 1) | ||
87 | #define SCTLR_EL2_M 1 | ||
88 | #define SCTLR_EL2_FLAGS (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C | \ | ||
89 | SCTLR_EL2_SA | SCTLR_EL2_I) | ||
90 | |||
91 | /* TCR_EL2 Registers bits */ | ||
92 | #define TCR_EL2_TBI (1 << 20) | ||
93 | #define TCR_EL2_PS (7 << 16) | ||
94 | #define TCR_EL2_PS_40B (2 << 16) | ||
95 | #define TCR_EL2_TG0 (1 << 14) | ||
96 | #define TCR_EL2_SH0 (3 << 12) | ||
97 | #define TCR_EL2_ORGN0 (3 << 10) | ||
98 | #define TCR_EL2_IRGN0 (3 << 8) | ||
99 | #define TCR_EL2_T0SZ 0x3f | ||
100 | #define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \ | ||
101 | TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ) | ||
102 | |||
103 | #define TCR_EL2_FLAGS (TCR_EL2_PS_40B) | ||
104 | |||
105 | /* VTCR_EL2 Registers bits */ | ||
106 | #define VTCR_EL2_PS_MASK (7 << 16) | ||
107 | #define VTCR_EL2_PS_40B (2 << 16) | ||
108 | #define VTCR_EL2_TG0_MASK (1 << 14) | ||
109 | #define VTCR_EL2_TG0_4K (0 << 14) | ||
110 | #define VTCR_EL2_TG0_64K (1 << 14) | ||
111 | #define VTCR_EL2_SH0_MASK (3 << 12) | ||
112 | #define VTCR_EL2_SH0_INNER (3 << 12) | ||
113 | #define VTCR_EL2_ORGN0_MASK (3 << 10) | ||
114 | #define VTCR_EL2_ORGN0_WBWA (1 << 10) | ||
115 | #define VTCR_EL2_IRGN0_MASK (3 << 8) | ||
116 | #define VTCR_EL2_IRGN0_WBWA (1 << 8) | ||
117 | #define VTCR_EL2_SL0_MASK (3 << 6) | ||
118 | #define VTCR_EL2_SL0_LVL1 (1 << 6) | ||
119 | #define VTCR_EL2_T0SZ_MASK 0x3f | ||
120 | #define VTCR_EL2_T0SZ_40B 24 | ||
121 | |||
122 | #ifdef CONFIG_ARM64_64K_PAGES | ||
123 | /* | ||
124 | * Stage2 translation configuration: | ||
125 | * 40bits output (PS = 2) | ||
126 | * 40bits input (T0SZ = 24) | ||
127 | * 64kB pages (TG0 = 1) | ||
128 | * 2 level page tables (SL = 1) | ||
129 | */ | ||
130 | #define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_64K | \ | ||
131 | VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ | ||
132 | VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \ | ||
133 | VTCR_EL2_T0SZ_40B) | ||
134 | #define VTTBR_X (38 - VTCR_EL2_T0SZ_40B) | ||
135 | #else | ||
136 | /* | ||
137 | * Stage2 translation configuration: | ||
138 | * 40bits output (PS = 2) | ||
139 | * 40bits input (T0SZ = 24) | ||
140 | * 4kB pages (TG0 = 0) | ||
141 | * 3 level page tables (SL = 1) | ||
142 | */ | ||
143 | #define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_4K | \ | ||
144 | VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ | ||
145 | VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \ | ||
146 | VTCR_EL2_T0SZ_40B) | ||
147 | #define VTTBR_X (37 - VTCR_EL2_T0SZ_40B) | ||
148 | #endif | ||
149 | |||
150 | #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) | ||
151 | #define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) | ||
152 | #define VTTBR_VMID_SHIFT (48LLU) | ||
153 | #define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) | ||
154 | |||
155 | /* Hyp System Trap Register */ | ||
156 | #define HSTR_EL2_TTEE (1 << 16) | ||
157 | #define HSTR_EL2_T(x) (1 << x) | ||
158 | |||
159 | /* Hyp Coprocessor Trap Register */ | ||
160 | #define CPTR_EL2_TCPAC (1 << 31) | ||
161 | #define CPTR_EL2_TTA (1 << 20) | ||
162 | #define CPTR_EL2_TFP (1 << 10) | ||
163 | |||
164 | /* Hyp Debug Configuration Register bits */ | ||
165 | #define MDCR_EL2_TDRA (1 << 11) | ||
166 | #define MDCR_EL2_TDOSA (1 << 10) | ||
167 | #define MDCR_EL2_TDA (1 << 9) | ||
168 | #define MDCR_EL2_TDE (1 << 8) | ||
169 | #define MDCR_EL2_HPME (1 << 7) | ||
170 | #define MDCR_EL2_TPM (1 << 6) | ||
171 | #define MDCR_EL2_TPMCR (1 << 5) | ||
172 | #define MDCR_EL2_HPMN_MASK (0x1F) | ||
173 | |||
174 | /* Exception Syndrome Register (ESR) bits */ | ||
175 | #define ESR_EL2_EC_SHIFT (26) | ||
176 | #define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT) | ||
177 | #define ESR_EL2_IL (1U << 25) | ||
178 | #define ESR_EL2_ISS (ESR_EL2_IL - 1) | ||
179 | #define ESR_EL2_ISV_SHIFT (24) | ||
180 | #define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT) | ||
181 | #define ESR_EL2_SAS_SHIFT (22) | ||
182 | #define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT) | ||
183 | #define ESR_EL2_SSE (1 << 21) | ||
184 | #define ESR_EL2_SRT_SHIFT (16) | ||
185 | #define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT) | ||
186 | #define ESR_EL2_SF (1 << 15) | ||
187 | #define ESR_EL2_AR (1 << 14) | ||
188 | #define ESR_EL2_EA (1 << 9) | ||
189 | #define ESR_EL2_CM (1 << 8) | ||
190 | #define ESR_EL2_S1PTW (1 << 7) | ||
191 | #define ESR_EL2_WNR (1 << 6) | ||
192 | #define ESR_EL2_FSC (0x3f) | ||
193 | #define ESR_EL2_FSC_TYPE (0x3c) | ||
194 | |||
195 | #define ESR_EL2_CV_SHIFT (24) | ||
196 | #define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT) | ||
197 | #define ESR_EL2_COND_SHIFT (20) | ||
198 | #define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT) | ||
199 | |||
200 | |||
201 | #define FSC_FAULT (0x04) | ||
202 | #define FSC_PERM (0x0c) | ||
203 | |||
204 | /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ | ||
205 | #define HPFAR_MASK (~0xFUL) | ||
206 | |||
207 | #define ESR_EL2_EC_UNKNOWN (0x00) | ||
208 | #define ESR_EL2_EC_WFI (0x01) | ||
209 | #define ESR_EL2_EC_CP15_32 (0x03) | ||
210 | #define ESR_EL2_EC_CP15_64 (0x04) | ||
211 | #define ESR_EL2_EC_CP14_MR (0x05) | ||
212 | #define ESR_EL2_EC_CP14_LS (0x06) | ||
213 | #define ESR_EL2_EC_FP_ASIMD (0x07) | ||
214 | #define ESR_EL2_EC_CP10_ID (0x08) | ||
215 | #define ESR_EL2_EC_CP14_64 (0x0C) | ||
216 | #define ESR_EL2_EC_ILL_ISS (0x0E) | ||
217 | #define ESR_EL2_EC_SVC32 (0x11) | ||
218 | #define ESR_EL2_EC_HVC32 (0x12) | ||
219 | #define ESR_EL2_EC_SMC32 (0x13) | ||
220 | #define ESR_EL2_EC_SVC64 (0x15) | ||
221 | #define ESR_EL2_EC_HVC64 (0x16) | ||
222 | #define ESR_EL2_EC_SMC64 (0x17) | ||
223 | #define ESR_EL2_EC_SYS64 (0x18) | ||
224 | #define ESR_EL2_EC_IABT (0x20) | ||
225 | #define ESR_EL2_EC_IABT_HYP (0x21) | ||
226 | #define ESR_EL2_EC_PC_ALIGN (0x22) | ||
227 | #define ESR_EL2_EC_DABT (0x24) | ||
228 | #define ESR_EL2_EC_DABT_HYP (0x25) | ||
229 | #define ESR_EL2_EC_SP_ALIGN (0x26) | ||
230 | #define ESR_EL2_EC_FP_EXC32 (0x28) | ||
231 | #define ESR_EL2_EC_FP_EXC64 (0x2C) | ||
232 | #define ESR_EL2_EC_SERRROR (0x2F) | ||
233 | #define ESR_EL2_EC_BREAKPT (0x30) | ||
234 | #define ESR_EL2_EC_BREAKPT_HYP (0x31) | ||
235 | #define ESR_EL2_EC_SOFTSTP (0x32) | ||
236 | #define ESR_EL2_EC_SOFTSTP_HYP (0x33) | ||
237 | #define ESR_EL2_EC_WATCHPT (0x34) | ||
238 | #define ESR_EL2_EC_WATCHPT_HYP (0x35) | ||
239 | #define ESR_EL2_EC_BKPT32 (0x38) | ||
240 | #define ESR_EL2_EC_VECTOR32 (0x3A) | ||
241 | #define ESR_EL2_EC_BRK64 (0x3C) | ||
242 | |||
243 | #define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10 | ||
244 | |||
245 | #endif /* __ARM64_KVM_ARM_H__ */ | ||
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h new file mode 100644 index 000000000000..c92de4163eba --- /dev/null +++ b/arch/arm64/include/asm/kvm_asm.h | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM_KVM_ASM_H__ | ||
19 | #define __ARM_KVM_ASM_H__ | ||
20 | |||
21 | /* | ||
22 | * 0 is reserved as an invalid value. | ||
23 | * Order *must* be kept in sync with the hyp switch code. | ||
24 | */ | ||
25 | #define MPIDR_EL1 1 /* MultiProcessor Affinity Register */ | ||
26 | #define CSSELR_EL1 2 /* Cache Size Selection Register */ | ||
27 | #define SCTLR_EL1 3 /* System Control Register */ | ||
28 | #define ACTLR_EL1 4 /* Auxilliary Control Register */ | ||
29 | #define CPACR_EL1 5 /* Coprocessor Access Control */ | ||
30 | #define TTBR0_EL1 6 /* Translation Table Base Register 0 */ | ||
31 | #define TTBR1_EL1 7 /* Translation Table Base Register 1 */ | ||
32 | #define TCR_EL1 8 /* Translation Control Register */ | ||
33 | #define ESR_EL1 9 /* Exception Syndrome Register */ | ||
34 | #define AFSR0_EL1 10 /* Auxilary Fault Status Register 0 */ | ||
35 | #define AFSR1_EL1 11 /* Auxilary Fault Status Register 1 */ | ||
36 | #define FAR_EL1 12 /* Fault Address Register */ | ||
37 | #define MAIR_EL1 13 /* Memory Attribute Indirection Register */ | ||
38 | #define VBAR_EL1 14 /* Vector Base Address Register */ | ||
39 | #define CONTEXTIDR_EL1 15 /* Context ID Register */ | ||
40 | #define TPIDR_EL0 16 /* Thread ID, User R/W */ | ||
41 | #define TPIDRRO_EL0 17 /* Thread ID, User R/O */ | ||
42 | #define TPIDR_EL1 18 /* Thread ID, Privileged */ | ||
43 | #define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ | ||
44 | #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ | ||
45 | /* 32bit specific registers. Keep them at the end of the range */ | ||
46 | #define DACR32_EL2 21 /* Domain Access Control Register */ | ||
47 | #define IFSR32_EL2 22 /* Instruction Fault Status Register */ | ||
48 | #define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */ | ||
49 | #define DBGVCR32_EL2 24 /* Debug Vector Catch Register */ | ||
50 | #define TEECR32_EL1 25 /* ThumbEE Configuration Register */ | ||
51 | #define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */ | ||
52 | #define NR_SYS_REGS 27 | ||
53 | |||
54 | /* 32bit mapping */ | ||
55 | #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ | ||
56 | #define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */ | ||
57 | #define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */ | ||
58 | #define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */ | ||
59 | #define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */ | ||
60 | #define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */ | ||
61 | #define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */ | ||
62 | #define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */ | ||
63 | #define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */ | ||
64 | #define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */ | ||
65 | #define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */ | ||
66 | #define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */ | ||
67 | #define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */ | ||
68 | #define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */ | ||
69 | #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ | ||
70 | #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ | ||
71 | #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ | ||
72 | #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ | ||
73 | #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ | ||
74 | #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ | ||
75 | #define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */ | ||
76 | #define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */ | ||
77 | #define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */ | ||
78 | #define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */ | ||
79 | #define c10_AMAIR (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ | ||
80 | #define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ | ||
81 | #define NR_CP15_REGS (NR_SYS_REGS * 2) | ||
82 | |||
83 | #define ARM_EXCEPTION_IRQ 0 | ||
84 | #define ARM_EXCEPTION_TRAP 1 | ||
85 | |||
86 | #ifndef __ASSEMBLY__ | ||
87 | struct kvm; | ||
88 | struct kvm_vcpu; | ||
89 | |||
90 | extern char __kvm_hyp_init[]; | ||
91 | extern char __kvm_hyp_init_end[]; | ||
92 | |||
93 | extern char __kvm_hyp_vector[]; | ||
94 | |||
95 | extern char __kvm_hyp_code_start[]; | ||
96 | extern char __kvm_hyp_code_end[]; | ||
97 | |||
98 | extern void __kvm_flush_vm_context(void); | ||
99 | extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | ||
100 | |||
101 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); | ||
102 | #endif | ||
103 | |||
104 | #endif /* __ARM_KVM_ASM_H__ */ | ||
diff --git a/arch/arm64/include/asm/kvm_coproc.h b/arch/arm64/include/asm/kvm_coproc.h new file mode 100644 index 000000000000..9a59301cd014 --- /dev/null +++ b/arch/arm64/include/asm/kvm_coproc.h | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/include/asm/kvm_coproc.h | ||
6 | * Copyright (C) 2012 Rusty Russell IBM Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __ARM64_KVM_COPROC_H__ | ||
22 | #define __ARM64_KVM_COPROC_H__ | ||
23 | |||
24 | #include <linux/kvm_host.h> | ||
25 | |||
26 | void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); | ||
27 | |||
28 | struct kvm_sys_reg_table { | ||
29 | const struct sys_reg_desc *table; | ||
30 | size_t num; | ||
31 | }; | ||
32 | |||
33 | struct kvm_sys_reg_target_table { | ||
34 | struct kvm_sys_reg_table table64; | ||
35 | struct kvm_sys_reg_table table32; | ||
36 | }; | ||
37 | |||
38 | void kvm_register_target_sys_reg_table(unsigned int target, | ||
39 | struct kvm_sys_reg_target_table *table); | ||
40 | |||
41 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
42 | int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
43 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
44 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
45 | int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
46 | |||
47 | #define kvm_coproc_table_init kvm_sys_reg_table_init | ||
48 | void kvm_sys_reg_table_init(void); | ||
49 | |||
50 | struct kvm_one_reg; | ||
51 | int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); | ||
52 | int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | ||
53 | int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | ||
54 | unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu); | ||
55 | |||
56 | #endif /* __ARM64_KVM_COPROC_H__ */ | ||
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h new file mode 100644 index 000000000000..eec073875218 --- /dev/null +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/include/kvm_emulate.h | ||
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #ifndef __ARM64_KVM_EMULATE_H__ | ||
23 | #define __ARM64_KVM_EMULATE_H__ | ||
24 | |||
25 | #include <linux/kvm_host.h> | ||
26 | #include <asm/kvm_asm.h> | ||
27 | #include <asm/kvm_arm.h> | ||
28 | #include <asm/kvm_mmio.h> | ||
29 | #include <asm/ptrace.h> | ||
30 | |||
31 | unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); | ||
32 | unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu); | ||
33 | |||
34 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); | ||
35 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); | ||
36 | |||
37 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); | ||
38 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | ||
39 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | ||
40 | |||
41 | static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) | ||
42 | { | ||
43 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; | ||
44 | } | ||
45 | |||
46 | static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu) | ||
47 | { | ||
48 | return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; | ||
49 | } | ||
50 | |||
51 | static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) | ||
52 | { | ||
53 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; | ||
54 | } | ||
55 | |||
56 | static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) | ||
57 | { | ||
58 | return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); | ||
59 | } | ||
60 | |||
61 | static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) | ||
62 | { | ||
63 | if (vcpu_mode_is_32bit(vcpu)) | ||
64 | return kvm_condition_valid32(vcpu); | ||
65 | |||
66 | return true; | ||
67 | } | ||
68 | |||
69 | static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) | ||
70 | { | ||
71 | if (vcpu_mode_is_32bit(vcpu)) | ||
72 | kvm_skip_instr32(vcpu, is_wide_instr); | ||
73 | else | ||
74 | *vcpu_pc(vcpu) += 4; | ||
75 | } | ||
76 | |||
77 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) | ||
78 | { | ||
79 | *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; | ||
80 | } | ||
81 | |||
82 | static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) | ||
83 | { | ||
84 | if (vcpu_mode_is_32bit(vcpu)) | ||
85 | return vcpu_reg32(vcpu, reg_num); | ||
86 | |||
87 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; | ||
88 | } | ||
89 | |||
90 | /* Get vcpu SPSR for current mode */ | ||
91 | static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu) | ||
92 | { | ||
93 | if (vcpu_mode_is_32bit(vcpu)) | ||
94 | return vcpu_spsr32(vcpu); | ||
95 | |||
96 | return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; | ||
97 | } | ||
98 | |||
99 | static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) | ||
100 | { | ||
101 | u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; | ||
102 | |||
103 | if (vcpu_mode_is_32bit(vcpu)) | ||
104 | return mode > COMPAT_PSR_MODE_USR; | ||
105 | |||
106 | return mode != PSR_MODE_EL0t; | ||
107 | } | ||
108 | |||
109 | static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) | ||
110 | { | ||
111 | return vcpu->arch.fault.esr_el2; | ||
112 | } | ||
113 | |||
114 | static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) | ||
115 | { | ||
116 | return vcpu->arch.fault.far_el2; | ||
117 | } | ||
118 | |||
119 | static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) | ||
120 | { | ||
121 | return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; | ||
122 | } | ||
123 | |||
124 | static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) | ||
125 | { | ||
126 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV); | ||
127 | } | ||
128 | |||
129 | static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) | ||
130 | { | ||
131 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR); | ||
132 | } | ||
133 | |||
134 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) | ||
135 | { | ||
136 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE); | ||
137 | } | ||
138 | |||
139 | static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) | ||
140 | { | ||
141 | return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT; | ||
142 | } | ||
143 | |||
144 | static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) | ||
145 | { | ||
146 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA); | ||
147 | } | ||
148 | |||
149 | static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) | ||
150 | { | ||
151 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW); | ||
152 | } | ||
153 | |||
154 | static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) | ||
155 | { | ||
156 | return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT); | ||
157 | } | ||
158 | |||
159 | /* This one is not specific to Data Abort */ | ||
160 | static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) | ||
161 | { | ||
162 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL); | ||
163 | } | ||
164 | |||
165 | static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) | ||
166 | { | ||
167 | return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT; | ||
168 | } | ||
169 | |||
170 | static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) | ||
171 | { | ||
172 | return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT; | ||
173 | } | ||
174 | |||
175 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) | ||
176 | { | ||
177 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; | ||
178 | } | ||
179 | |||
180 | #endif /* __ARM64_KVM_EMULATE_H__ */ | ||
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h new file mode 100644 index 000000000000..644d73956864 --- /dev/null +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -0,0 +1,202 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/include/asm/kvm_host.h: | ||
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #ifndef __ARM64_KVM_HOST_H__ | ||
23 | #define __ARM64_KVM_HOST_H__ | ||
24 | |||
25 | #include <asm/kvm.h> | ||
26 | #include <asm/kvm_asm.h> | ||
27 | #include <asm/kvm_mmio.h> | ||
28 | |||
29 | #define KVM_MAX_VCPUS 4 | ||
30 | #define KVM_USER_MEM_SLOTS 32 | ||
31 | #define KVM_PRIVATE_MEM_SLOTS 4 | ||
32 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | ||
33 | |||
34 | #include <kvm/arm_vgic.h> | ||
35 | #include <kvm/arm_arch_timer.h> | ||
36 | |||
37 | #define KVM_VCPU_MAX_FEATURES 2 | ||
38 | |||
39 | /* We don't currently support large pages. */ | ||
40 | #define KVM_HPAGE_GFN_SHIFT(x) 0 | ||
41 | #define KVM_NR_PAGE_SIZES 1 | ||
42 | #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) | ||
43 | |||
44 | struct kvm_vcpu; | ||
45 | int kvm_target_cpu(void); | ||
46 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); | ||
47 | int kvm_arch_dev_ioctl_check_extension(long ext); | ||
48 | |||
49 | struct kvm_arch { | ||
50 | /* The VMID generation used for the virt. memory system */ | ||
51 | u64 vmid_gen; | ||
52 | u32 vmid; | ||
53 | |||
54 | /* 1-level 2nd stage table and lock */ | ||
55 | spinlock_t pgd_lock; | ||
56 | pgd_t *pgd; | ||
57 | |||
58 | /* VTTBR value associated with above pgd and vmid */ | ||
59 | u64 vttbr; | ||
60 | |||
61 | /* Interrupt controller */ | ||
62 | struct vgic_dist vgic; | ||
63 | |||
64 | /* Timer */ | ||
65 | struct arch_timer_kvm timer; | ||
66 | }; | ||
67 | |||
68 | #define KVM_NR_MEM_OBJS 40 | ||
69 | |||
70 | /* | ||
71 | * We don't want allocation failures within the mmu code, so we preallocate | ||
72 | * enough memory for a single page fault in a cache. | ||
73 | */ | ||
74 | struct kvm_mmu_memory_cache { | ||
75 | int nobjs; | ||
76 | void *objects[KVM_NR_MEM_OBJS]; | ||
77 | }; | ||
78 | |||
79 | struct kvm_vcpu_fault_info { | ||
80 | u32 esr_el2; /* Hyp Syndrom Register */ | ||
81 | u64 far_el2; /* Hyp Fault Address Register */ | ||
82 | u64 hpfar_el2; /* Hyp IPA Fault Address Register */ | ||
83 | }; | ||
84 | |||
85 | struct kvm_cpu_context { | ||
86 | struct kvm_regs gp_regs; | ||
87 | union { | ||
88 | u64 sys_regs[NR_SYS_REGS]; | ||
89 | u32 cp15[NR_CP15_REGS]; | ||
90 | }; | ||
91 | }; | ||
92 | |||
93 | typedef struct kvm_cpu_context kvm_cpu_context_t; | ||
94 | |||
95 | struct kvm_vcpu_arch { | ||
96 | struct kvm_cpu_context ctxt; | ||
97 | |||
98 | /* HYP configuration */ | ||
99 | u64 hcr_el2; | ||
100 | |||
101 | /* Exception Information */ | ||
102 | struct kvm_vcpu_fault_info fault; | ||
103 | |||
104 | /* Pointer to host CPU context */ | ||
105 | kvm_cpu_context_t *host_cpu_context; | ||
106 | |||
107 | /* VGIC state */ | ||
108 | struct vgic_cpu vgic_cpu; | ||
109 | struct arch_timer_cpu timer_cpu; | ||
110 | |||
111 | /* | ||
112 | * Anything that is not used directly from assembly code goes | ||
113 | * here. | ||
114 | */ | ||
115 | /* dcache set/way operation pending */ | ||
116 | int last_pcpu; | ||
117 | cpumask_t require_dcache_flush; | ||
118 | |||
119 | /* Don't run the guest */ | ||
120 | bool pause; | ||
121 | |||
122 | /* IO related fields */ | ||
123 | struct kvm_decode mmio_decode; | ||
124 | |||
125 | /* Interrupt related fields */ | ||
126 | u64 irq_lines; /* IRQ and FIQ levels */ | ||
127 | |||
128 | /* Cache some mmu pages needed inside spinlock regions */ | ||
129 | struct kvm_mmu_memory_cache mmu_page_cache; | ||
130 | |||
131 | /* Target CPU and feature flags */ | ||
132 | u32 target; | ||
133 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); | ||
134 | |||
135 | /* Detect first run of a vcpu */ | ||
136 | bool has_run_once; | ||
137 | }; | ||
138 | |||
139 | #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) | ||
140 | #define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) | ||
141 | #define vcpu_cp15(v,r) ((v)->arch.ctxt.cp15[(r)]) | ||
142 | |||
143 | struct kvm_vm_stat { | ||
144 | u32 remote_tlb_flush; | ||
145 | }; | ||
146 | |||
147 | struct kvm_vcpu_stat { | ||
148 | u32 halt_wakeup; | ||
149 | }; | ||
150 | |||
151 | struct kvm_vcpu_init; | ||
152 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | ||
153 | const struct kvm_vcpu_init *init); | ||
154 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); | ||
155 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | ||
156 | struct kvm_one_reg; | ||
157 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | ||
158 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | ||
159 | |||
160 | #define KVM_ARCH_WANT_MMU_NOTIFIER | ||
161 | struct kvm; | ||
162 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | ||
163 | int kvm_unmap_hva_range(struct kvm *kvm, | ||
164 | unsigned long start, unsigned long end); | ||
165 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | ||
166 | |||
167 | /* We do not have shadow page tables, hence the empty hooks */ | ||
168 | static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva) | ||
169 | { | ||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | ||
174 | { | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); | ||
179 | struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); | ||
180 | |||
181 | u64 kvm_call_hyp(void *hypfn, ...); | ||
182 | |||
183 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
184 | int exception_index); | ||
185 | |||
186 | int kvm_perf_init(void); | ||
187 | int kvm_perf_teardown(void); | ||
188 | |||
189 | static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, | ||
190 | phys_addr_t pgd_ptr, | ||
191 | unsigned long hyp_stack_ptr, | ||
192 | unsigned long vector_ptr) | ||
193 | { | ||
194 | /* | ||
195 | * Call initialization code, and switch to the full blown | ||
196 | * HYP code. | ||
197 | */ | ||
198 | kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr, | ||
199 | hyp_stack_ptr, vector_ptr); | ||
200 | } | ||
201 | |||
202 | #endif /* __ARM64_KVM_HOST_H__ */ | ||
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h new file mode 100644 index 000000000000..fc2f689c0694 --- /dev/null +++ b/arch/arm64/include/asm/kvm_mmio.h | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM64_KVM_MMIO_H__ | ||
19 | #define __ARM64_KVM_MMIO_H__ | ||
20 | |||
21 | #include <linux/kvm_host.h> | ||
22 | #include <asm/kvm_asm.h> | ||
23 | #include <asm/kvm_arm.h> | ||
24 | |||
25 | /* | ||
26 | * This is annoying. The mmio code requires this, even if we don't | ||
27 | * need any decoding. To be fixed. | ||
28 | */ | ||
29 | struct kvm_decode { | ||
30 | unsigned long rt; | ||
31 | bool sign_extend; | ||
32 | }; | ||
33 | |||
34 | /* | ||
35 | * The in-kernel MMIO emulation code wants to use a copy of run->mmio, | ||
36 | * which is an anonymous type. Use our own type instead. | ||
37 | */ | ||
38 | struct kvm_exit_mmio { | ||
39 | phys_addr_t phys_addr; | ||
40 | u8 data[8]; | ||
41 | u32 len; | ||
42 | bool is_write; | ||
43 | }; | ||
44 | |||
45 | static inline void kvm_prepare_mmio(struct kvm_run *run, | ||
46 | struct kvm_exit_mmio *mmio) | ||
47 | { | ||
48 | run->mmio.phys_addr = mmio->phys_addr; | ||
49 | run->mmio.len = mmio->len; | ||
50 | run->mmio.is_write = mmio->is_write; | ||
51 | memcpy(run->mmio.data, mmio->data, mmio->len); | ||
52 | run->exit_reason = KVM_EXIT_MMIO; | ||
53 | } | ||
54 | |||
55 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
56 | int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
57 | phys_addr_t fault_ipa); | ||
58 | |||
59 | #endif /* __ARM64_KVM_MMIO_H__ */ | ||
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h new file mode 100644 index 000000000000..efe609c6a3c9 --- /dev/null +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -0,0 +1,135 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM64_KVM_MMU_H__ | ||
19 | #define __ARM64_KVM_MMU_H__ | ||
20 | |||
21 | #include <asm/page.h> | ||
22 | #include <asm/memory.h> | ||
23 | |||
24 | /* | ||
25 | * As we only have the TTBR0_EL2 register, we cannot express | ||
26 | * "negative" addresses. This makes it impossible to directly share | ||
27 | * mappings with the kernel. | ||
28 | * | ||
29 | * Instead, give the HYP mode its own VA region at a fixed offset from | ||
30 | * the kernel by just masking the top bits (which are all ones for a | ||
31 | * kernel address). | ||
32 | */ | ||
33 | #define HYP_PAGE_OFFSET_SHIFT VA_BITS | ||
34 | #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1) | ||
35 | #define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK) | ||
36 | |||
37 | /* | ||
38 | * Our virtual mapping for the idmap-ed MMU-enable code. Must be | ||
39 | * shared across all the page-tables. Conveniently, we use the last | ||
40 | * possible page, where no kernel mapping will ever exist. | ||
41 | */ | ||
42 | #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) | ||
43 | |||
44 | #ifdef __ASSEMBLY__ | ||
45 | |||
46 | /* | ||
47 | * Convert a kernel VA into a HYP VA. | ||
48 | * reg: VA to be converted. | ||
49 | */ | ||
50 | .macro kern_hyp_va reg | ||
51 | and \reg, \reg, #HYP_PAGE_OFFSET_MASK | ||
52 | .endm | ||
53 | |||
54 | #else | ||
55 | |||
56 | #include <asm/cachetype.h> | ||
57 | #include <asm/cacheflush.h> | ||
58 | |||
59 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) | ||
60 | |||
61 | /* | ||
62 | * Align KVM with the kernel's view of physical memory. Should be | ||
63 | * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration. | ||
64 | */ | ||
65 | #define KVM_PHYS_SHIFT PHYS_MASK_SHIFT | ||
66 | #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) | ||
67 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) | ||
68 | |||
69 | /* Make sure we get the right size, and thus the right alignment */ | ||
70 | #define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT)) | ||
71 | #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) | ||
72 | |||
73 | int create_hyp_mappings(void *from, void *to); | ||
74 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); | ||
75 | void free_boot_hyp_pgd(void); | ||
76 | void free_hyp_pgds(void); | ||
77 | |||
78 | int kvm_alloc_stage2_pgd(struct kvm *kvm); | ||
79 | void kvm_free_stage2_pgd(struct kvm *kvm); | ||
80 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | ||
81 | phys_addr_t pa, unsigned long size); | ||
82 | |||
83 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
84 | |||
85 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); | ||
86 | |||
87 | phys_addr_t kvm_mmu_get_httbr(void); | ||
88 | phys_addr_t kvm_mmu_get_boot_httbr(void); | ||
89 | phys_addr_t kvm_get_idmap_vector(void); | ||
90 | int kvm_mmu_init(void); | ||
91 | void kvm_clear_hyp_idmap(void); | ||
92 | |||
93 | #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) | ||
94 | |||
95 | static inline bool kvm_is_write_fault(unsigned long esr) | ||
96 | { | ||
97 | unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT; | ||
98 | |||
99 | if (esr_ec == ESR_EL2_EC_IABT) | ||
100 | return false; | ||
101 | |||
102 | if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR)) | ||
103 | return false; | ||
104 | |||
105 | return true; | ||
106 | } | ||
107 | |||
108 | static inline void kvm_clean_dcache_area(void *addr, size_t size) {} | ||
109 | static inline void kvm_clean_pgd(pgd_t *pgd) {} | ||
110 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} | ||
111 | static inline void kvm_clean_pte(pte_t *pte) {} | ||
112 | static inline void kvm_clean_pte_entry(pte_t *pte) {} | ||
113 | |||
114 | static inline void kvm_set_s2pte_writable(pte_t *pte) | ||
115 | { | ||
116 | pte_val(*pte) |= PTE_S2_RDWR; | ||
117 | } | ||
118 | |||
119 | struct kvm; | ||
120 | |||
121 | static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) | ||
122 | { | ||
123 | if (!icache_is_aliasing()) { /* PIPT */ | ||
124 | unsigned long hva = gfn_to_hva(kvm, gfn); | ||
125 | flush_icache_range(hva, hva + PAGE_SIZE); | ||
126 | } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ | ||
127 | /* any kind of VIPT cache */ | ||
128 | __flush_icache_all(); | ||
129 | } | ||
130 | } | ||
131 | |||
132 | #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) | ||
133 | |||
134 | #endif /* __ASSEMBLY__ */ | ||
135 | #endif /* __ARM64_KVM_MMU_H__ */ | ||
diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h new file mode 100644 index 000000000000..e301a4816355 --- /dev/null +++ b/arch/arm64/include/asm/kvm_psci.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM64_KVM_PSCI_H__ | ||
19 | #define __ARM64_KVM_PSCI_H__ | ||
20 | |||
21 | bool kvm_psci_call(struct kvm_vcpu *vcpu); | ||
22 | |||
23 | #endif /* __ARM64_KVM_PSCI_H__ */ | ||
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 381f556b664e..20925bcf4e2a 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -90,6 +90,12 @@ | |||
90 | #define MT_NORMAL_NC 3 | 90 | #define MT_NORMAL_NC 3 |
91 | #define MT_NORMAL 4 | 91 | #define MT_NORMAL 4 |
92 | 92 | ||
93 | /* | ||
94 | * Memory types for Stage-2 translation | ||
95 | */ | ||
96 | #define MT_S2_NORMAL 0xf | ||
97 | #define MT_S2_DEVICE_nGnRE 0x1 | ||
98 | |||
93 | #ifndef __ASSEMBLY__ | 99 | #ifndef __ASSEMBLY__ |
94 | 100 | ||
95 | extern phys_addr_t memstart_addr; | 101 | extern phys_addr_t memstart_addr; |
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index e2bc385adb6b..a9eee33dfa62 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h | |||
@@ -151,12 +151,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
151 | { | 151 | { |
152 | unsigned int cpu = smp_processor_id(); | 152 | unsigned int cpu = smp_processor_id(); |
153 | 153 | ||
154 | #ifdef CONFIG_SMP | ||
155 | /* check for possible thread migration */ | ||
156 | if (!cpumask_empty(mm_cpumask(next)) && | ||
157 | !cpumask_test_cpu(cpu, mm_cpumask(next))) | ||
158 | __flush_icache_all(); | ||
159 | #endif | ||
160 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) | 154 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) |
161 | check_and_switch_context(next, tsk); | 155 | check_and_switch_context(next, tsk); |
162 | } | 156 | } |
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 75fd13d289b9..e182a356c979 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h | |||
@@ -25,16 +25,27 @@ | |||
25 | /* | 25 | /* |
26 | * Hardware page table definitions. | 26 | * Hardware page table definitions. |
27 | * | 27 | * |
28 | * Level 1 descriptor (PUD). | ||
29 | */ | ||
30 | |||
31 | #define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1) | ||
32 | |||
33 | /* | ||
28 | * Level 2 descriptor (PMD). | 34 | * Level 2 descriptor (PMD). |
29 | */ | 35 | */ |
30 | #define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0) | 36 | #define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0) |
31 | #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) | 37 | #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) |
32 | #define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) | 38 | #define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) |
33 | #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) | 39 | #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) |
40 | #define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1) | ||
34 | 41 | ||
35 | /* | 42 | /* |
36 | * Section | 43 | * Section |
37 | */ | 44 | */ |
45 | #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) | ||
46 | #define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 2) | ||
47 | #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ | ||
48 | #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ | ||
38 | #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) | 49 | #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) |
39 | #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) | 50 | #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) |
40 | #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) | 51 | #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) |
@@ -53,6 +64,7 @@ | |||
53 | #define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) | 64 | #define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) |
54 | #define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) | 65 | #define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) |
55 | #define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) | 66 | #define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) |
67 | #define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1) | ||
56 | #define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ | 68 | #define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ |
57 | #define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ | 69 | #define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ |
58 | #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ | 70 | #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ |
@@ -68,6 +80,24 @@ | |||
68 | #define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2) | 80 | #define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2) |
69 | 81 | ||
70 | /* | 82 | /* |
83 | * 2nd stage PTE definitions | ||
84 | */ | ||
85 | #define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */ | ||
86 | #define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ | ||
87 | |||
88 | /* | ||
89 | * Memory Attribute override for Stage-2 (MemAttr[3:0]) | ||
90 | */ | ||
91 | #define PTE_S2_MEMATTR(t) (_AT(pteval_t, (t)) << 2) | ||
92 | #define PTE_S2_MEMATTR_MASK (_AT(pteval_t, 0xf) << 2) | ||
93 | |||
94 | /* | ||
95 | * EL2/HYP PTE/PMD definitions | ||
96 | */ | ||
97 | #define PMD_HYP PMD_SECT_USER | ||
98 | #define PTE_HYP PTE_USER | ||
99 | |||
100 | /* | ||
71 | * 40-bit physical address supported. | 101 | * 40-bit physical address supported. |
72 | */ | 102 | */ |
73 | #define PHYS_MASK_SHIFT (40) | 103 | #define PHYS_MASK_SHIFT (40) |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 3a768e96cf0e..f0bebc5e22cd 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -25,8 +25,8 @@ | |||
25 | * Software defined PTE bits definition. | 25 | * Software defined PTE bits definition. |
26 | */ | 26 | */ |
27 | #define PTE_VALID (_AT(pteval_t, 1) << 0) | 27 | #define PTE_VALID (_AT(pteval_t, 1) << 0) |
28 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */ | 28 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */ |
29 | #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ | 29 | #define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */ |
30 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) | 30 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) |
31 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) | 31 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) |
32 | 32 | ||
@@ -66,7 +66,7 @@ extern pgprot_t pgprot_default; | |||
66 | 66 | ||
67 | #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) | 67 | #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) |
68 | 68 | ||
69 | #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE) | 69 | #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) |
70 | #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | 70 | #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
71 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) | 71 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) |
72 | #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | 72 | #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
@@ -76,7 +76,13 @@ extern pgprot_t pgprot_default; | |||
76 | #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) | 76 | #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) |
77 | #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) | 77 | #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) |
78 | 78 | ||
79 | #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE) | 79 | #define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) |
80 | #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) | ||
81 | |||
82 | #define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) | ||
83 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) | ||
84 | |||
85 | #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) | ||
80 | #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | 86 | #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
81 | #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) | 87 | #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) |
82 | #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | 88 | #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
@@ -119,7 +125,7 @@ extern struct page *empty_zero_page; | |||
119 | #define pte_none(pte) (!pte_val(pte)) | 125 | #define pte_none(pte) (!pte_val(pte)) |
120 | #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) | 126 | #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) |
121 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) | 127 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) |
122 | #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) | 128 | #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr)) |
123 | 129 | ||
124 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | 130 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) |
125 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) | 131 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) |
@@ -173,12 +179,76 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
173 | /* | 179 | /* |
174 | * Huge pte definitions. | 180 | * Huge pte definitions. |
175 | */ | 181 | */ |
176 | #define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE) | 182 | #define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT)) |
177 | #define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE)) | 183 | #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) |
184 | |||
185 | /* | ||
186 | * Hugetlb definitions. | ||
187 | */ | ||
188 | #define HUGE_MAX_HSTATE 2 | ||
189 | #define HPAGE_SHIFT PMD_SHIFT | ||
190 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | ||
191 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | ||
192 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | ||
178 | 193 | ||
179 | #define __HAVE_ARCH_PTE_SPECIAL | 194 | #define __HAVE_ARCH_PTE_SPECIAL |
180 | 195 | ||
181 | /* | 196 | /* |
197 | * Software PMD bits for THP | ||
198 | */ | ||
199 | |||
200 | #define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) | ||
201 | #define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 57) | ||
202 | |||
203 | /* | ||
204 | * THP definitions. | ||
205 | */ | ||
206 | #define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF) | ||
207 | |||
208 | #define __HAVE_ARCH_PMD_WRITE | ||
209 | #define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) | ||
210 | |||
211 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
212 | #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) | ||
213 | #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) | ||
214 | #endif | ||
215 | |||
216 | #define PMD_BIT_FUNC(fn,op) \ | ||
217 | static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } | ||
218 | |||
219 | PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY); | ||
220 | PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); | ||
221 | PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING); | ||
222 | PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY); | ||
223 | PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY); | ||
224 | PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); | ||
225 | PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK); | ||
226 | |||
227 | #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) | ||
228 | |||
229 | #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) | ||
230 | #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) | ||
231 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) | ||
232 | |||
233 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) | ||
234 | |||
235 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | ||
236 | { | ||
237 | const pmdval_t mask = PMD_SECT_USER | PMD_SECT_PXN | PMD_SECT_UXN | | ||
238 | PMD_SECT_RDONLY | PMD_SECT_PROT_NONE | | ||
239 | PMD_SECT_VALID; | ||
240 | pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); | ||
241 | return pmd; | ||
242 | } | ||
243 | |||
244 | #define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd) | ||
245 | |||
246 | static inline int has_transparent_hugepage(void) | ||
247 | { | ||
248 | return 1; | ||
249 | } | ||
250 | |||
251 | /* | ||
182 | * Mark the prot value as uncacheable and unbufferable. | 252 | * Mark the prot value as uncacheable and unbufferable. |
183 | */ | 253 | */ |
184 | #define pgprot_noncached(prot) \ | 254 | #define pgprot_noncached(prot) \ |
@@ -197,6 +267,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |||
197 | 267 | ||
198 | #define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) | 268 | #define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) |
199 | 269 | ||
270 | #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ | ||
271 | PMD_TYPE_TABLE) | ||
272 | #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ | ||
273 | PMD_TYPE_SECT) | ||
274 | |||
275 | |||
200 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | 276 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
201 | { | 277 | { |
202 | *pmdp = pmd; | 278 | *pmdp = pmd; |
@@ -263,7 +339,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | |||
263 | #endif | 339 | #endif |
264 | 340 | ||
265 | /* Find an entry in the third-level page table.. */ | 341 | /* Find an entry in the third-level page table.. */ |
266 | #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 342 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
267 | 343 | ||
268 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 344 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
269 | { | 345 | { |
@@ -281,12 +357,12 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; | |||
281 | 357 | ||
282 | /* | 358 | /* |
283 | * Encode and decode a swap entry: | 359 | * Encode and decode a swap entry: |
284 | * bits 0-1: present (must be zero) | 360 | * bits 0, 2: present (must both be zero) |
285 | * bit 2: PTE_FILE | 361 | * bit 3: PTE_FILE |
286 | * bits 3-8: swap type | 362 | * bits 4-8: swap type |
287 | * bits 9-63: swap offset | 363 | * bits 9-63: swap offset |
288 | */ | 364 | */ |
289 | #define __SWP_TYPE_SHIFT 3 | 365 | #define __SWP_TYPE_SHIFT 4 |
290 | #define __SWP_TYPE_BITS 6 | 366 | #define __SWP_TYPE_BITS 6 |
291 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) | 367 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) |
292 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) | 368 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) |
@@ -306,15 +382,15 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; | |||
306 | 382 | ||
307 | /* | 383 | /* |
308 | * Encode and decode a file entry: | 384 | * Encode and decode a file entry: |
309 | * bits 0-1: present (must be zero) | 385 | * bits 0, 2: present (must both be zero) |
310 | * bit 2: PTE_FILE | 386 | * bit 3: PTE_FILE |
311 | * bits 3-63: file offset / PAGE_SIZE | 387 | * bits 4-63: file offset / PAGE_SIZE |
312 | */ | 388 | */ |
313 | #define pte_file(pte) (pte_val(pte) & PTE_FILE) | 389 | #define pte_file(pte) (pte_val(pte) & PTE_FILE) |
314 | #define pte_to_pgoff(x) (pte_val(x) >> 3) | 390 | #define pte_to_pgoff(x) (pte_val(x) >> 4) |
315 | #define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE) | 391 | #define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE) |
316 | 392 | ||
317 | #define PTE_FILE_MAX_BITS 61 | 393 | #define PTE_FILE_MAX_BITS 60 |
318 | 394 | ||
319 | extern int kern_addr_valid(unsigned long addr); | 395 | extern int kern_addr_valid(unsigned long addr); |
320 | 396 | ||
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 41a71ee4c3df..0dacbbf9458b 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h | |||
@@ -171,7 +171,5 @@ extern unsigned long profile_pc(struct pt_regs *regs); | |||
171 | #define profile_pc(regs) instruction_pointer(regs) | 171 | #define profile_pc(regs) instruction_pointer(regs) |
172 | #endif | 172 | #endif |
173 | 173 | ||
174 | extern int aarch32_break_trap(struct pt_regs *regs); | ||
175 | |||
176 | #endif /* __ASSEMBLY__ */ | 174 | #endif /* __ASSEMBLY__ */ |
177 | #endif | 175 | #endif |
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 7065e920149d..0defa0728a9b 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h | |||
@@ -59,9 +59,10 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
59 | unsigned int tmp; | 59 | unsigned int tmp; |
60 | 60 | ||
61 | asm volatile( | 61 | asm volatile( |
62 | " ldaxr %w0, %1\n" | 62 | "2: ldaxr %w0, %1\n" |
63 | " cbnz %w0, 1f\n" | 63 | " cbnz %w0, 1f\n" |
64 | " stxr %w0, %w2, %1\n" | 64 | " stxr %w0, %w2, %1\n" |
65 | " cbnz %w0, 2b\n" | ||
65 | "1:\n" | 66 | "1:\n" |
66 | : "=&r" (tmp), "+Q" (lock->lock) | 67 | : "=&r" (tmp), "+Q" (lock->lock) |
67 | : "r" (1) | 68 | : "r" (1) |
diff --git a/arch/arm64/include/asm/sync_bitops.h b/arch/arm64/include/asm/sync_bitops.h new file mode 100644 index 000000000000..8da0bf4f7659 --- /dev/null +++ b/arch/arm64/include/asm/sync_bitops.h | |||
@@ -0,0 +1,26 @@ | |||
1 | #ifndef __ASM_SYNC_BITOPS_H__ | ||
2 | #define __ASM_SYNC_BITOPS_H__ | ||
3 | |||
4 | #include <asm/bitops.h> | ||
5 | #include <asm/cmpxchg.h> | ||
6 | |||
7 | /* sync_bitops functions are equivalent to the SMP implementation of the | ||
8 | * original functions, independently from CONFIG_SMP being defined. | ||
9 | * | ||
10 | * We need them because _set_bit etc are not SMP safe if !CONFIG_SMP. But | ||
11 | * under Xen you might be communicating with a completely external entity | ||
12 | * who might be on another CPU (e.g. two uniprocessor guests communicating | ||
13 | * via event channels and grant tables). So we need a variant of the bit | ||
14 | * ops which are SMP safe even on a UP kernel. | ||
15 | */ | ||
16 | |||
17 | #define sync_set_bit(nr, p) set_bit(nr, p) | ||
18 | #define sync_clear_bit(nr, p) clear_bit(nr, p) | ||
19 | #define sync_change_bit(nr, p) change_bit(nr, p) | ||
20 | #define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p) | ||
21 | #define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p) | ||
22 | #define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p) | ||
23 | #define sync_test_bit(nr, addr) test_bit(nr, addr) | ||
24 | #define sync_cmpxchg cmpxchg | ||
25 | |||
26 | #endif | ||
diff --git a/arch/arm64/include/asm/timex.h b/arch/arm64/include/asm/timex.h index b24a31a7e2c9..81a076eb37fa 100644 --- a/arch/arm64/include/asm/timex.h +++ b/arch/arm64/include/asm/timex.h | |||
@@ -16,14 +16,14 @@ | |||
16 | #ifndef __ASM_TIMEX_H | 16 | #ifndef __ASM_TIMEX_H |
17 | #define __ASM_TIMEX_H | 17 | #define __ASM_TIMEX_H |
18 | 18 | ||
19 | #include <asm/arch_timer.h> | ||
20 | |||
19 | /* | 21 | /* |
20 | * Use the current timer as a cycle counter since this is what we use for | 22 | * Use the current timer as a cycle counter since this is what we use for |
21 | * the delay loop. | 23 | * the delay loop. |
22 | */ | 24 | */ |
23 | #define get_cycles() ({ cycles_t c; read_current_timer(&c); c; }) | 25 | #define get_cycles() arch_counter_get_cntvct() |
24 | 26 | ||
25 | #include <asm-generic/timex.h> | 27 | #include <asm-generic/timex.h> |
26 | 28 | ||
27 | #define ARCH_HAS_READ_CURRENT_TIMER | ||
28 | |||
29 | #endif | 29 | #endif |
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 654f0968030b..46b3beb4b773 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
@@ -187,4 +187,10 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | |||
187 | 187 | ||
188 | #define tlb_migrate_finish(mm) do { } while (0) | 188 | #define tlb_migrate_finish(mm) do { } while (0) |
189 | 189 | ||
190 | static inline void | ||
191 | tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) | ||
192 | { | ||
193 | tlb_add_flush(tlb, addr); | ||
194 | } | ||
195 | |||
190 | #endif | 196 | #endif |
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 122d6320f745..8b482035cfc2 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h | |||
@@ -117,6 +117,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, | |||
117 | dsb(); | 117 | dsb(); |
118 | } | 118 | } |
119 | 119 | ||
120 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | ||
121 | |||
120 | #endif | 122 | #endif |
121 | 123 | ||
122 | #endif | 124 | #endif |
diff --git a/arch/arm64/include/asm/xen/events.h b/arch/arm64/include/asm/xen/events.h new file mode 100644 index 000000000000..86553213c132 --- /dev/null +++ b/arch/arm64/include/asm/xen/events.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef _ASM_ARM64_XEN_EVENTS_H | ||
2 | #define _ASM_ARM64_XEN_EVENTS_H | ||
3 | |||
4 | #include <asm/ptrace.h> | ||
5 | #include <asm/atomic.h> | ||
6 | |||
7 | enum ipi_vector { | ||
8 | XEN_PLACEHOLDER_VECTOR, | ||
9 | |||
10 | /* Xen IPIs go here */ | ||
11 | XEN_NR_IPIS, | ||
12 | }; | ||
13 | |||
14 | static inline int xen_irqs_disabled(struct pt_regs *regs) | ||
15 | { | ||
16 | return raw_irqs_disabled_flags((unsigned long) regs->pstate); | ||
17 | } | ||
18 | |||
19 | #define xchg_xen_ulong(ptr, val) xchg((ptr), (val)) | ||
20 | |||
21 | #endif /* _ASM_ARM64_XEN_EVENTS_H */ | ||
diff --git a/arch/arm64/include/asm/xen/hypercall.h b/arch/arm64/include/asm/xen/hypercall.h new file mode 100644 index 000000000000..74b0c423ff5b --- /dev/null +++ b/arch/arm64/include/asm/xen/hypercall.h | |||
@@ -0,0 +1 @@ | |||
#include <../../arm/include/asm/xen/hypercall.h> | |||
diff --git a/arch/arm64/include/asm/xen/hypervisor.h b/arch/arm64/include/asm/xen/hypervisor.h new file mode 100644 index 000000000000..f263da8e8769 --- /dev/null +++ b/arch/arm64/include/asm/xen/hypervisor.h | |||
@@ -0,0 +1 @@ | |||
#include <../../arm/include/asm/xen/hypervisor.h> | |||
diff --git a/arch/arm64/include/asm/xen/interface.h b/arch/arm64/include/asm/xen/interface.h new file mode 100644 index 000000000000..44457aebeed4 --- /dev/null +++ b/arch/arm64/include/asm/xen/interface.h | |||
@@ -0,0 +1 @@ | |||
#include <../../arm/include/asm/xen/interface.h> | |||
diff --git a/arch/arm64/include/asm/xen/page.h b/arch/arm64/include/asm/xen/page.h new file mode 100644 index 000000000000..bed87ec36780 --- /dev/null +++ b/arch/arm64/include/asm/xen/page.h | |||
@@ -0,0 +1 @@ | |||
#include <../../arm/include/asm/xen/page.h> | |||
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h new file mode 100644 index 000000000000..5031f4263937 --- /dev/null +++ b/arch/arm64/include/uapi/asm/kvm.h | |||
@@ -0,0 +1,168 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/include/uapi/asm/kvm.h: | ||
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #ifndef __ARM_KVM_H__ | ||
23 | #define __ARM_KVM_H__ | ||
24 | |||
25 | #define KVM_SPSR_EL1 0 | ||
26 | #define KVM_SPSR_SVC KVM_SPSR_EL1 | ||
27 | #define KVM_SPSR_ABT 1 | ||
28 | #define KVM_SPSR_UND 2 | ||
29 | #define KVM_SPSR_IRQ 3 | ||
30 | #define KVM_SPSR_FIQ 4 | ||
31 | #define KVM_NR_SPSR 5 | ||
32 | |||
33 | #ifndef __ASSEMBLY__ | ||
34 | #include <asm/types.h> | ||
35 | #include <asm/ptrace.h> | ||
36 | |||
37 | #define __KVM_HAVE_GUEST_DEBUG | ||
38 | #define __KVM_HAVE_IRQ_LINE | ||
39 | |||
40 | #define KVM_REG_SIZE(id) \ | ||
41 | (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) | ||
42 | |||
43 | struct kvm_regs { | ||
44 | struct user_pt_regs regs; /* sp = sp_el0 */ | ||
45 | |||
46 | __u64 sp_el1; | ||
47 | __u64 elr_el1; | ||
48 | |||
49 | __u64 spsr[KVM_NR_SPSR]; | ||
50 | |||
51 | struct user_fpsimd_state fp_regs; | ||
52 | }; | ||
53 | |||
54 | /* Supported Processor Types */ | ||
55 | #define KVM_ARM_TARGET_AEM_V8 0 | ||
56 | #define KVM_ARM_TARGET_FOUNDATION_V8 1 | ||
57 | #define KVM_ARM_TARGET_CORTEX_A57 2 | ||
58 | |||
59 | #define KVM_ARM_NUM_TARGETS 3 | ||
60 | |||
61 | /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ | ||
62 | #define KVM_ARM_DEVICE_TYPE_SHIFT 0 | ||
63 | #define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT) | ||
64 | #define KVM_ARM_DEVICE_ID_SHIFT 16 | ||
65 | #define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT) | ||
66 | |||
67 | /* Supported device IDs */ | ||
68 | #define KVM_ARM_DEVICE_VGIC_V2 0 | ||
69 | |||
70 | /* Supported VGIC address types */ | ||
71 | #define KVM_VGIC_V2_ADDR_TYPE_DIST 0 | ||
72 | #define KVM_VGIC_V2_ADDR_TYPE_CPU 1 | ||
73 | |||
74 | #define KVM_VGIC_V2_DIST_SIZE 0x1000 | ||
75 | #define KVM_VGIC_V2_CPU_SIZE 0x2000 | ||
76 | |||
77 | #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ | ||
78 | #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ | ||
79 | |||
80 | struct kvm_vcpu_init { | ||
81 | __u32 target; | ||
82 | __u32 features[7]; | ||
83 | }; | ||
84 | |||
85 | struct kvm_sregs { | ||
86 | }; | ||
87 | |||
88 | struct kvm_fpu { | ||
89 | }; | ||
90 | |||
91 | struct kvm_guest_debug_arch { | ||
92 | }; | ||
93 | |||
94 | struct kvm_debug_exit_arch { | ||
95 | }; | ||
96 | |||
97 | struct kvm_sync_regs { | ||
98 | }; | ||
99 | |||
100 | struct kvm_arch_memory_slot { | ||
101 | }; | ||
102 | |||
103 | /* If you need to interpret the index values, here is the key: */ | ||
104 | #define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 | ||
105 | #define KVM_REG_ARM_COPROC_SHIFT 16 | ||
106 | |||
107 | /* Normal registers are mapped as coprocessor 16. */ | ||
108 | #define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) | ||
109 | #define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / sizeof(__u32)) | ||
110 | |||
111 | /* Some registers need more space to represent values. */ | ||
112 | #define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT) | ||
113 | #define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00 | ||
114 | #define KVM_REG_ARM_DEMUX_ID_SHIFT 8 | ||
115 | #define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT) | ||
116 | #define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF | ||
117 | #define KVM_REG_ARM_DEMUX_VAL_SHIFT 0 | ||
118 | |||
119 | /* AArch64 system registers */ | ||
120 | #define KVM_REG_ARM64_SYSREG (0x0013 << KVM_REG_ARM_COPROC_SHIFT) | ||
121 | #define KVM_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000 | ||
122 | #define KVM_REG_ARM64_SYSREG_OP0_SHIFT 14 | ||
123 | #define KVM_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800 | ||
124 | #define KVM_REG_ARM64_SYSREG_OP1_SHIFT 11 | ||
125 | #define KVM_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780 | ||
126 | #define KVM_REG_ARM64_SYSREG_CRN_SHIFT 7 | ||
127 | #define KVM_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078 | ||
128 | #define KVM_REG_ARM64_SYSREG_CRM_SHIFT 3 | ||
129 | #define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007 | ||
130 | #define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0 | ||
131 | |||
132 | /* KVM_IRQ_LINE irq field index values */ | ||
133 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 | ||
134 | #define KVM_ARM_IRQ_TYPE_MASK 0xff | ||
135 | #define KVM_ARM_IRQ_VCPU_SHIFT 16 | ||
136 | #define KVM_ARM_IRQ_VCPU_MASK 0xff | ||
137 | #define KVM_ARM_IRQ_NUM_SHIFT 0 | ||
138 | #define KVM_ARM_IRQ_NUM_MASK 0xffff | ||
139 | |||
140 | /* irq_type field */ | ||
141 | #define KVM_ARM_IRQ_TYPE_CPU 0 | ||
142 | #define KVM_ARM_IRQ_TYPE_SPI 1 | ||
143 | #define KVM_ARM_IRQ_TYPE_PPI 2 | ||
144 | |||
145 | /* out-of-kernel GIC cpu interrupt injection irq_number field */ | ||
146 | #define KVM_ARM_IRQ_CPU_IRQ 0 | ||
147 | #define KVM_ARM_IRQ_CPU_FIQ 1 | ||
148 | |||
149 | /* Highest supported SPI, from VGIC_NR_IRQS */ | ||
150 | #define KVM_ARM_IRQ_GIC_MAX 127 | ||
151 | |||
152 | /* PSCI interface */ | ||
153 | #define KVM_PSCI_FN_BASE 0x95c1ba5e | ||
154 | #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) | ||
155 | |||
156 | #define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0) | ||
157 | #define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1) | ||
158 | #define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) | ||
159 | #define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) | ||
160 | |||
161 | #define KVM_PSCI_RET_SUCCESS 0 | ||
162 | #define KVM_PSCI_RET_NI ((unsigned long)-1) | ||
163 | #define KVM_PSCI_RET_INVAL ((unsigned long)-2) | ||
164 | #define KVM_PSCI_RET_DENIED ((unsigned long)-3) | ||
165 | |||
166 | #endif | ||
167 | |||
168 | #endif /* __ARM_KVM_H__ */ | ||
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index a2a4d810bea3..49c162c03b69 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c | |||
@@ -104,5 +104,38 @@ int main(void) | |||
104 | BLANK(); | 104 | BLANK(); |
105 | DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); | 105 | DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); |
106 | DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); | 106 | DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); |
107 | BLANK(); | ||
108 | #ifdef CONFIG_KVM_ARM_HOST | ||
109 | DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); | ||
110 | DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); | ||
111 | DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); | ||
112 | DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); | ||
113 | DEFINE(CPU_SP_EL1, offsetof(struct kvm_regs, sp_el1)); | ||
114 | DEFINE(CPU_ELR_EL1, offsetof(struct kvm_regs, elr_el1)); | ||
115 | DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr)); | ||
116 | DEFINE(CPU_SYSREGS, offsetof(struct kvm_cpu_context, sys_regs)); | ||
117 | DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2)); | ||
118 | DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2)); | ||
119 | DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2)); | ||
120 | DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); | ||
121 | DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); | ||
122 | DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); | ||
123 | DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); | ||
124 | DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); | ||
125 | DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); | ||
126 | DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); | ||
127 | DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); | ||
128 | DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); | ||
129 | DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); | ||
130 | DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr)); | ||
131 | DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr)); | ||
132 | DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr)); | ||
133 | DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr)); | ||
134 | DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr)); | ||
135 | DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr)); | ||
136 | DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); | ||
137 | DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); | ||
138 | DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); | ||
139 | #endif | ||
107 | return 0; | 140 | return 0; |
108 | } | 141 | } |
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index f4726dc054b3..08018e3df580 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/ptrace.h> | 25 | #include <linux/ptrace.h> |
26 | #include <linux/stat.h> | 26 | #include <linux/stat.h> |
27 | #include <linux/uaccess.h> | ||
27 | 28 | ||
28 | #include <asm/debug-monitors.h> | 29 | #include <asm/debug-monitors.h> |
29 | #include <asm/local.h> | 30 | #include <asm/local.h> |
@@ -226,13 +227,74 @@ static int single_step_handler(unsigned long addr, unsigned int esr, | |||
226 | return 0; | 227 | return 0; |
227 | } | 228 | } |
228 | 229 | ||
229 | static int __init single_step_init(void) | 230 | static int brk_handler(unsigned long addr, unsigned int esr, |
231 | struct pt_regs *regs) | ||
232 | { | ||
233 | siginfo_t info; | ||
234 | |||
235 | if (!user_mode(regs)) | ||
236 | return -EFAULT; | ||
237 | |||
238 | info = (siginfo_t) { | ||
239 | .si_signo = SIGTRAP, | ||
240 | .si_errno = 0, | ||
241 | .si_code = TRAP_BRKPT, | ||
242 | .si_addr = (void __user *)instruction_pointer(regs), | ||
243 | }; | ||
244 | |||
245 | force_sig_info(SIGTRAP, &info, current); | ||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | int aarch32_break_handler(struct pt_regs *regs) | ||
250 | { | ||
251 | siginfo_t info; | ||
252 | unsigned int instr; | ||
253 | bool bp = false; | ||
254 | void __user *pc = (void __user *)instruction_pointer(regs); | ||
255 | |||
256 | if (!compat_user_mode(regs)) | ||
257 | return -EFAULT; | ||
258 | |||
259 | if (compat_thumb_mode(regs)) { | ||
260 | /* get 16-bit Thumb instruction */ | ||
261 | get_user(instr, (u16 __user *)pc); | ||
262 | if (instr == AARCH32_BREAK_THUMB2_LO) { | ||
263 | /* get second half of 32-bit Thumb-2 instruction */ | ||
264 | get_user(instr, (u16 __user *)(pc + 2)); | ||
265 | bp = instr == AARCH32_BREAK_THUMB2_HI; | ||
266 | } else { | ||
267 | bp = instr == AARCH32_BREAK_THUMB; | ||
268 | } | ||
269 | } else { | ||
270 | /* 32-bit ARM instruction */ | ||
271 | get_user(instr, (u32 __user *)pc); | ||
272 | bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM; | ||
273 | } | ||
274 | |||
275 | if (!bp) | ||
276 | return -EFAULT; | ||
277 | |||
278 | info = (siginfo_t) { | ||
279 | .si_signo = SIGTRAP, | ||
280 | .si_errno = 0, | ||
281 | .si_code = TRAP_BRKPT, | ||
282 | .si_addr = pc, | ||
283 | }; | ||
284 | |||
285 | force_sig_info(SIGTRAP, &info, current); | ||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | static int __init debug_traps_init(void) | ||
230 | { | 290 | { |
231 | hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP, | 291 | hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP, |
232 | TRAP_HWBKPT, "single-step handler"); | 292 | TRAP_HWBKPT, "single-step handler"); |
293 | hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP, | ||
294 | TRAP_BRKPT, "ptrace BRK handler"); | ||
233 | return 0; | 295 | return 0; |
234 | } | 296 | } |
235 | arch_initcall(single_step_init); | 297 | arch_initcall(debug_traps_init); |
236 | 298 | ||
237 | /* Re-enable single step for syscall restarting. */ | 299 | /* Re-enable single step for syscall restarting. */ |
238 | void user_rewind_single_step(struct task_struct *task) | 300 | void user_rewind_single_step(struct task_struct *task) |
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 6e1e77f1831c..fecdbf7de82e 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
@@ -53,28 +53,6 @@ void ptrace_disable(struct task_struct *child) | |||
53 | { | 53 | { |
54 | } | 54 | } |
55 | 55 | ||
56 | /* | ||
57 | * Handle hitting a breakpoint. | ||
58 | */ | ||
59 | static int ptrace_break(struct pt_regs *regs) | ||
60 | { | ||
61 | siginfo_t info = { | ||
62 | .si_signo = SIGTRAP, | ||
63 | .si_errno = 0, | ||
64 | .si_code = TRAP_BRKPT, | ||
65 | .si_addr = (void __user *)instruction_pointer(regs), | ||
66 | }; | ||
67 | |||
68 | force_sig_info(SIGTRAP, &info, current); | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static int arm64_break_trap(unsigned long addr, unsigned int esr, | ||
73 | struct pt_regs *regs) | ||
74 | { | ||
75 | return ptrace_break(regs); | ||
76 | } | ||
77 | |||
78 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 56 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
79 | /* | 57 | /* |
80 | * Handle hitting a HW-breakpoint. | 58 | * Handle hitting a HW-breakpoint. |
@@ -817,33 +795,6 @@ static const struct user_regset_view user_aarch32_view = { | |||
817 | .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) | 795 | .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) |
818 | }; | 796 | }; |
819 | 797 | ||
820 | int aarch32_break_trap(struct pt_regs *regs) | ||
821 | { | ||
822 | unsigned int instr; | ||
823 | bool bp = false; | ||
824 | void __user *pc = (void __user *)instruction_pointer(regs); | ||
825 | |||
826 | if (compat_thumb_mode(regs)) { | ||
827 | /* get 16-bit Thumb instruction */ | ||
828 | get_user(instr, (u16 __user *)pc); | ||
829 | if (instr == AARCH32_BREAK_THUMB2_LO) { | ||
830 | /* get second half of 32-bit Thumb-2 instruction */ | ||
831 | get_user(instr, (u16 __user *)(pc + 2)); | ||
832 | bp = instr == AARCH32_BREAK_THUMB2_HI; | ||
833 | } else { | ||
834 | bp = instr == AARCH32_BREAK_THUMB; | ||
835 | } | ||
836 | } else { | ||
837 | /* 32-bit ARM instruction */ | ||
838 | get_user(instr, (u32 __user *)pc); | ||
839 | bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM; | ||
840 | } | ||
841 | |||
842 | if (bp) | ||
843 | return ptrace_break(regs); | ||
844 | return 1; | ||
845 | } | ||
846 | |||
847 | static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, | 798 | static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, |
848 | compat_ulong_t __user *ret) | 799 | compat_ulong_t __user *ret) |
849 | { | 800 | { |
@@ -1111,16 +1062,6 @@ long arch_ptrace(struct task_struct *child, long request, | |||
1111 | return ptrace_request(child, request, addr, data); | 1062 | return ptrace_request(child, request, addr, data); |
1112 | } | 1063 | } |
1113 | 1064 | ||
1114 | |||
1115 | static int __init ptrace_break_init(void) | ||
1116 | { | ||
1117 | hook_debug_fault_code(DBG_ESR_EVT_BRK, arm64_break_trap, SIGTRAP, | ||
1118 | TRAP_BRKPT, "ptrace BRK handler"); | ||
1119 | return 0; | ||
1120 | } | ||
1121 | core_initcall(ptrace_break_init); | ||
1122 | |||
1123 | |||
1124 | asmlinkage int syscall_trace(int dir, struct pt_regs *regs) | 1065 | asmlinkage int syscall_trace(int dir, struct pt_regs *regs) |
1125 | { | 1066 | { |
1126 | unsigned long saved_reg; | 1067 | unsigned long saved_reg; |
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index a551f88ae2c1..03dc3718eb13 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c | |||
@@ -68,12 +68,6 @@ unsigned long long notrace sched_clock(void) | |||
68 | return arch_timer_read_counter() * sched_clock_mult; | 68 | return arch_timer_read_counter() * sched_clock_mult; |
69 | } | 69 | } |
70 | 70 | ||
71 | int read_current_timer(unsigned long *timer_value) | ||
72 | { | ||
73 | *timer_value = arch_timer_read_counter(); | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | void __init time_init(void) | 71 | void __init time_init(void) |
78 | { | 72 | { |
79 | u32 arch_timer_rate; | 73 | u32 arch_timer_rate; |
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index f30852d28590..7ffadddb645d 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/syscalls.h> | 32 | #include <linux/syscalls.h> |
33 | 33 | ||
34 | #include <asm/atomic.h> | 34 | #include <asm/atomic.h> |
35 | #include <asm/debug-monitors.h> | ||
35 | #include <asm/traps.h> | 36 | #include <asm/traps.h> |
36 | #include <asm/stacktrace.h> | 37 | #include <asm/stacktrace.h> |
37 | #include <asm/exception.h> | 38 | #include <asm/exception.h> |
@@ -261,11 +262,9 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) | |||
261 | siginfo_t info; | 262 | siginfo_t info; |
262 | void __user *pc = (void __user *)instruction_pointer(regs); | 263 | void __user *pc = (void __user *)instruction_pointer(regs); |
263 | 264 | ||
264 | #ifdef CONFIG_COMPAT | ||
265 | /* check for AArch32 breakpoint instructions */ | 265 | /* check for AArch32 breakpoint instructions */ |
266 | if (compat_user_mode(regs) && aarch32_break_trap(regs) == 0) | 266 | if (!aarch32_break_handler(regs)) |
267 | return; | 267 | return; |
268 | #endif | ||
269 | 268 | ||
270 | if (show_unhandled_signals && unhandled_signal(current, SIGILL) && | 269 | if (show_unhandled_signals && unhandled_signal(current, SIGILL) && |
271 | printk_ratelimit()) { | 270 | printk_ratelimit()) { |
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 3fae2be8b016..f5e55747242f 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S | |||
@@ -17,6 +17,19 @@ ENTRY(stext) | |||
17 | 17 | ||
18 | jiffies = jiffies_64; | 18 | jiffies = jiffies_64; |
19 | 19 | ||
20 | #define HYPERVISOR_TEXT \ | ||
21 | /* \ | ||
22 | * Force the alignment to be compatible with \ | ||
23 | * the vectors requirements \ | ||
24 | */ \ | ||
25 | . = ALIGN(2048); \ | ||
26 | VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ | ||
27 | *(.hyp.idmap.text) \ | ||
28 | VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \ | ||
29 | VMLINUX_SYMBOL(__hyp_text_start) = .; \ | ||
30 | *(.hyp.text) \ | ||
31 | VMLINUX_SYMBOL(__hyp_text_end) = .; | ||
32 | |||
20 | SECTIONS | 33 | SECTIONS |
21 | { | 34 | { |
22 | /* | 35 | /* |
@@ -49,6 +62,7 @@ SECTIONS | |||
49 | TEXT_TEXT | 62 | TEXT_TEXT |
50 | SCHED_TEXT | 63 | SCHED_TEXT |
51 | LOCK_TEXT | 64 | LOCK_TEXT |
65 | HYPERVISOR_TEXT | ||
52 | *(.fixup) | 66 | *(.fixup) |
53 | *(.gnu.warning) | 67 | *(.gnu.warning) |
54 | . = ALIGN(16); | 68 | . = ALIGN(16); |
@@ -56,7 +70,7 @@ SECTIONS | |||
56 | } | 70 | } |
57 | 71 | ||
58 | RO_DATA(PAGE_SIZE) | 72 | RO_DATA(PAGE_SIZE) |
59 | 73 | EXCEPTION_TABLE(8) | |
60 | _etext = .; /* End of text and rodata section */ | 74 | _etext = .; /* End of text and rodata section */ |
61 | 75 | ||
62 | . = ALIGN(PAGE_SIZE); | 76 | . = ALIGN(PAGE_SIZE); |
@@ -99,14 +113,6 @@ SECTIONS | |||
99 | READ_MOSTLY_DATA(64) | 113 | READ_MOSTLY_DATA(64) |
100 | 114 | ||
101 | /* | 115 | /* |
102 | * The exception fixup table (might need resorting at runtime) | ||
103 | */ | ||
104 | . = ALIGN(32); | ||
105 | __start___ex_table = .; | ||
106 | *(__ex_table) | ||
107 | __stop___ex_table = .; | ||
108 | |||
109 | /* | ||
110 | * and the usual data section | 116 | * and the usual data section |
111 | */ | 117 | */ |
112 | DATA_DATA | 118 | DATA_DATA |
@@ -124,3 +130,9 @@ SECTIONS | |||
124 | STABS_DEBUG | 130 | STABS_DEBUG |
125 | .comment 0 : { *(.comment) } | 131 | .comment 0 : { *(.comment) } |
126 | } | 132 | } |
133 | |||
134 | /* | ||
135 | * The HYP init code can't be more than a page long. | ||
136 | */ | ||
137 | ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end), | ||
138 | "HYP init code too big") | ||
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile new file mode 100644 index 000000000000..72a9fd583ad3 --- /dev/null +++ b/arch/arm64/kvm/Makefile | |||
@@ -0,0 +1,23 @@ | |||
1 | # | ||
2 | # Makefile for Kernel-based Virtual Machine module | ||
3 | # | ||
4 | |||
5 | ccflags-y += -Ivirt/kvm -Iarch/arm64/kvm | ||
6 | CFLAGS_arm.o := -I. | ||
7 | CFLAGS_mmu.o := -I. | ||
8 | |||
9 | KVM=../../../virt/kvm | ||
10 | ARM=../../../arch/arm/kvm | ||
11 | |||
12 | obj-$(CONFIG_KVM_ARM_HOST) += kvm.o | ||
13 | |||
14 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o | ||
15 | kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o | ||
16 | kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o | ||
17 | |||
18 | kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o | ||
19 | kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o | ||
20 | kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o | ||
21 | |||
22 | kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o | ||
23 | kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o | ||
diff --git a/arch/arm64/kvm/emulate.c b/arch/arm64/kvm/emulate.c new file mode 100644 index 000000000000..124418d17049 --- /dev/null +++ b/arch/arm64/kvm/emulate.c | |||
@@ -0,0 +1,158 @@ | |||
1 | /* | ||
2 | * (not much of an) Emulation layer for 32bit guests. | ||
3 | * | ||
4 | * Copyright (C) 2012,2013 - ARM Ltd | ||
5 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
6 | * | ||
7 | * based on arch/arm/kvm/emulate.c | ||
8 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
9 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
10 | * | ||
11 | * This program is free software: you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
22 | */ | ||
23 | |||
24 | #include <linux/kvm_host.h> | ||
25 | #include <asm/kvm_emulate.h> | ||
26 | |||
27 | /* | ||
28 | * stolen from arch/arm/kernel/opcodes.c | ||
29 | * | ||
30 | * condition code lookup table | ||
31 | * index into the table is test code: EQ, NE, ... LT, GT, AL, NV | ||
32 | * | ||
33 | * bit position in short is condition code: NZCV | ||
34 | */ | ||
35 | static const unsigned short cc_map[16] = { | ||
36 | 0xF0F0, /* EQ == Z set */ | ||
37 | 0x0F0F, /* NE */ | ||
38 | 0xCCCC, /* CS == C set */ | ||
39 | 0x3333, /* CC */ | ||
40 | 0xFF00, /* MI == N set */ | ||
41 | 0x00FF, /* PL */ | ||
42 | 0xAAAA, /* VS == V set */ | ||
43 | 0x5555, /* VC */ | ||
44 | 0x0C0C, /* HI == C set && Z clear */ | ||
45 | 0xF3F3, /* LS == C clear || Z set */ | ||
46 | 0xAA55, /* GE == (N==V) */ | ||
47 | 0x55AA, /* LT == (N!=V) */ | ||
48 | 0x0A05, /* GT == (!Z && (N==V)) */ | ||
49 | 0xF5FA, /* LE == (Z || (N!=V)) */ | ||
50 | 0xFFFF, /* AL always */ | ||
51 | 0 /* NV */ | ||
52 | }; | ||
53 | |||
54 | static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) | ||
55 | { | ||
56 | u32 esr = kvm_vcpu_get_hsr(vcpu); | ||
57 | |||
58 | if (esr & ESR_EL2_CV) | ||
59 | return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT; | ||
60 | |||
61 | return -1; | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * Check if a trapped instruction should have been executed or not. | ||
66 | */ | ||
67 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu) | ||
68 | { | ||
69 | unsigned long cpsr; | ||
70 | u32 cpsr_cond; | ||
71 | int cond; | ||
72 | |||
73 | /* Top two bits non-zero? Unconditional. */ | ||
74 | if (kvm_vcpu_get_hsr(vcpu) >> 30) | ||
75 | return true; | ||
76 | |||
77 | /* Is condition field valid? */ | ||
78 | cond = kvm_vcpu_get_condition(vcpu); | ||
79 | if (cond == 0xE) | ||
80 | return true; | ||
81 | |||
82 | cpsr = *vcpu_cpsr(vcpu); | ||
83 | |||
84 | if (cond < 0) { | ||
85 | /* This can happen in Thumb mode: examine IT state. */ | ||
86 | unsigned long it; | ||
87 | |||
88 | it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); | ||
89 | |||
90 | /* it == 0 => unconditional. */ | ||
91 | if (it == 0) | ||
92 | return true; | ||
93 | |||
94 | /* The cond for this insn works out as the top 4 bits. */ | ||
95 | cond = (it >> 4); | ||
96 | } | ||
97 | |||
98 | cpsr_cond = cpsr >> 28; | ||
99 | |||
100 | if (!((cc_map[cond] >> cpsr_cond) & 1)) | ||
101 | return false; | ||
102 | |||
103 | return true; | ||
104 | } | ||
105 | |||
106 | /** | ||
107 | * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block | ||
108 | * @vcpu: The VCPU pointer | ||
109 | * | ||
110 | * When exceptions occur while instructions are executed in Thumb IF-THEN | ||
111 | * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have | ||
112 | * to do this little bit of work manually. The fields map like this: | ||
113 | * | ||
114 | * IT[7:0] -> CPSR[26:25],CPSR[15:10] | ||
115 | */ | ||
116 | static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) | ||
117 | { | ||
118 | unsigned long itbits, cond; | ||
119 | unsigned long cpsr = *vcpu_cpsr(vcpu); | ||
120 | bool is_arm = !(cpsr & COMPAT_PSR_T_BIT); | ||
121 | |||
122 | BUG_ON(is_arm && (cpsr & COMPAT_PSR_IT_MASK)); | ||
123 | |||
124 | if (!(cpsr & COMPAT_PSR_IT_MASK)) | ||
125 | return; | ||
126 | |||
127 | cond = (cpsr & 0xe000) >> 13; | ||
128 | itbits = (cpsr & 0x1c00) >> (10 - 2); | ||
129 | itbits |= (cpsr & (0x3 << 25)) >> 25; | ||
130 | |||
131 | /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */ | ||
132 | if ((itbits & 0x7) == 0) | ||
133 | itbits = cond = 0; | ||
134 | else | ||
135 | itbits = (itbits << 1) & 0x1f; | ||
136 | |||
137 | cpsr &= ~COMPAT_PSR_IT_MASK; | ||
138 | cpsr |= cond << 13; | ||
139 | cpsr |= (itbits & 0x1c) << (10 - 2); | ||
140 | cpsr |= (itbits & 0x3) << 25; | ||
141 | *vcpu_cpsr(vcpu) = cpsr; | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * kvm_skip_instr - skip a trapped instruction and proceed to the next | ||
146 | * @vcpu: The vcpu pointer | ||
147 | */ | ||
148 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) | ||
149 | { | ||
150 | bool is_thumb; | ||
151 | |||
152 | is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT); | ||
153 | if (is_thumb && !is_wide_instr) | ||
154 | *vcpu_pc(vcpu) += 2; | ||
155 | else | ||
156 | *vcpu_pc(vcpu) += 4; | ||
157 | kvm_adjust_itstate(vcpu); | ||
158 | } | ||
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c new file mode 100644 index 000000000000..2c3ff67a8ecb --- /dev/null +++ b/arch/arm64/kvm/guest.c | |||
@@ -0,0 +1,265 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/kvm/guest.c: | ||
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #include <linux/errno.h> | ||
23 | #include <linux/err.h> | ||
24 | #include <linux/kvm_host.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/vmalloc.h> | ||
27 | #include <linux/fs.h> | ||
28 | #include <asm/cputype.h> | ||
29 | #include <asm/uaccess.h> | ||
30 | #include <asm/kvm.h> | ||
31 | #include <asm/kvm_asm.h> | ||
32 | #include <asm/kvm_emulate.h> | ||
33 | #include <asm/kvm_coproc.h> | ||
34 | |||
35 | struct kvm_stats_debugfs_item debugfs_entries[] = { | ||
36 | { NULL } | ||
37 | }; | ||
38 | |||
39 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | ||
40 | { | ||
41 | vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; | ||
42 | return 0; | ||
43 | } | ||
44 | |||
45 | static u64 core_reg_offset_from_id(u64 id) | ||
46 | { | ||
47 | return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); | ||
48 | } | ||
49 | |||
50 | static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
51 | { | ||
52 | /* | ||
53 | * Because the kvm_regs structure is a mix of 32, 64 and | ||
54 | * 128bit fields, we index it as if it was a 32bit | ||
55 | * array. Hence below, nr_regs is the number of entries, and | ||
56 | * off the index in the "array". | ||
57 | */ | ||
58 | __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; | ||
59 | struct kvm_regs *regs = vcpu_gp_regs(vcpu); | ||
60 | int nr_regs = sizeof(*regs) / sizeof(__u32); | ||
61 | u32 off; | ||
62 | |||
63 | /* Our ID is an index into the kvm_regs struct. */ | ||
64 | off = core_reg_offset_from_id(reg->id); | ||
65 | if (off >= nr_regs || | ||
66 | (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) | ||
67 | return -ENOENT; | ||
68 | |||
69 | if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) | ||
70 | return -EFAULT; | ||
71 | |||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
76 | { | ||
77 | __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; | ||
78 | struct kvm_regs *regs = vcpu_gp_regs(vcpu); | ||
79 | int nr_regs = sizeof(*regs) / sizeof(__u32); | ||
80 | __uint128_t tmp; | ||
81 | void *valp = &tmp; | ||
82 | u64 off; | ||
83 | int err = 0; | ||
84 | |||
85 | /* Our ID is an index into the kvm_regs struct. */ | ||
86 | off = core_reg_offset_from_id(reg->id); | ||
87 | if (off >= nr_regs || | ||
88 | (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) | ||
89 | return -ENOENT; | ||
90 | |||
91 | if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) | ||
92 | return -EINVAL; | ||
93 | |||
94 | if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) { | ||
95 | err = -EFAULT; | ||
96 | goto out; | ||
97 | } | ||
98 | |||
99 | if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { | ||
100 | u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK; | ||
101 | switch (mode) { | ||
102 | case COMPAT_PSR_MODE_USR: | ||
103 | case COMPAT_PSR_MODE_FIQ: | ||
104 | case COMPAT_PSR_MODE_IRQ: | ||
105 | case COMPAT_PSR_MODE_SVC: | ||
106 | case COMPAT_PSR_MODE_ABT: | ||
107 | case COMPAT_PSR_MODE_UND: | ||
108 | case PSR_MODE_EL0t: | ||
109 | case PSR_MODE_EL1t: | ||
110 | case PSR_MODE_EL1h: | ||
111 | break; | ||
112 | default: | ||
113 | err = -EINVAL; | ||
114 | goto out; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); | ||
119 | out: | ||
120 | return err; | ||
121 | } | ||
122 | |||
123 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
124 | { | ||
125 | return -EINVAL; | ||
126 | } | ||
127 | |||
128 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
129 | { | ||
130 | return -EINVAL; | ||
131 | } | ||
132 | |||
133 | static unsigned long num_core_regs(void) | ||
134 | { | ||
135 | return sizeof(struct kvm_regs) / sizeof(__u32); | ||
136 | } | ||
137 | |||
138 | /** | ||
139 | * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG | ||
140 | * | ||
141 | * This is for all registers. | ||
142 | */ | ||
143 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) | ||
144 | { | ||
145 | return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu); | ||
146 | } | ||
147 | |||
148 | /** | ||
149 | * kvm_arm_copy_reg_indices - get indices of all registers. | ||
150 | * | ||
151 | * We do core registers right here, then we apppend system regs. | ||
152 | */ | ||
153 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | ||
154 | { | ||
155 | unsigned int i; | ||
156 | const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; | ||
157 | |||
158 | for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { | ||
159 | if (put_user(core_reg | i, uindices)) | ||
160 | return -EFAULT; | ||
161 | uindices++; | ||
162 | } | ||
163 | |||
164 | return kvm_arm_copy_sys_reg_indices(vcpu, uindices); | ||
165 | } | ||
166 | |||
167 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
168 | { | ||
169 | /* We currently use nothing arch-specific in upper 32 bits */ | ||
170 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) | ||
171 | return -EINVAL; | ||
172 | |||
173 | /* Register group 16 means we want a core register. */ | ||
174 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) | ||
175 | return get_core_reg(vcpu, reg); | ||
176 | |||
177 | return kvm_arm_sys_reg_get_reg(vcpu, reg); | ||
178 | } | ||
179 | |||
180 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
181 | { | ||
182 | /* We currently use nothing arch-specific in upper 32 bits */ | ||
183 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) | ||
184 | return -EINVAL; | ||
185 | |||
186 | /* Register group 16 means we set a core register. */ | ||
187 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) | ||
188 | return set_core_reg(vcpu, reg); | ||
189 | |||
190 | return kvm_arm_sys_reg_set_reg(vcpu, reg); | ||
191 | } | ||
192 | |||
193 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | ||
194 | struct kvm_sregs *sregs) | ||
195 | { | ||
196 | return -EINVAL; | ||
197 | } | ||
198 | |||
199 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | ||
200 | struct kvm_sregs *sregs) | ||
201 | { | ||
202 | return -EINVAL; | ||
203 | } | ||
204 | |||
205 | int __attribute_const__ kvm_target_cpu(void) | ||
206 | { | ||
207 | unsigned long implementor = read_cpuid_implementor(); | ||
208 | unsigned long part_number = read_cpuid_part_number(); | ||
209 | |||
210 | if (implementor != ARM_CPU_IMP_ARM) | ||
211 | return -EINVAL; | ||
212 | |||
213 | switch (part_number) { | ||
214 | case ARM_CPU_PART_AEM_V8: | ||
215 | return KVM_ARM_TARGET_AEM_V8; | ||
216 | case ARM_CPU_PART_FOUNDATION: | ||
217 | return KVM_ARM_TARGET_FOUNDATION_V8; | ||
218 | case ARM_CPU_PART_CORTEX_A57: | ||
219 | /* Currently handled by the generic backend */ | ||
220 | return KVM_ARM_TARGET_CORTEX_A57; | ||
221 | default: | ||
222 | return -EINVAL; | ||
223 | } | ||
224 | } | ||
225 | |||
226 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | ||
227 | const struct kvm_vcpu_init *init) | ||
228 | { | ||
229 | unsigned int i; | ||
230 | int phys_target = kvm_target_cpu(); | ||
231 | |||
232 | if (init->target != phys_target) | ||
233 | return -EINVAL; | ||
234 | |||
235 | vcpu->arch.target = phys_target; | ||
236 | bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); | ||
237 | |||
238 | /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ | ||
239 | for (i = 0; i < sizeof(init->features) * 8; i++) { | ||
240 | if (init->features[i / 32] & (1 << (i % 32))) { | ||
241 | if (i >= KVM_VCPU_MAX_FEATURES) | ||
242 | return -ENOENT; | ||
243 | set_bit(i, vcpu->arch.features); | ||
244 | } | ||
245 | } | ||
246 | |||
247 | /* Now we know what it is, we can reset it. */ | ||
248 | return kvm_reset_vcpu(vcpu); | ||
249 | } | ||
250 | |||
251 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
252 | { | ||
253 | return -EINVAL; | ||
254 | } | ||
255 | |||
256 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
257 | { | ||
258 | return -EINVAL; | ||
259 | } | ||
260 | |||
261 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | ||
262 | struct kvm_translation *tr) | ||
263 | { | ||
264 | return -EINVAL; | ||
265 | } | ||
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c new file mode 100644 index 000000000000..9beaca033437 --- /dev/null +++ b/arch/arm64/kvm/handle_exit.c | |||
@@ -0,0 +1,124 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/kvm/handle_exit.c: | ||
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #include <linux/kvm.h> | ||
23 | #include <linux/kvm_host.h> | ||
24 | #include <asm/kvm_emulate.h> | ||
25 | #include <asm/kvm_coproc.h> | ||
26 | #include <asm/kvm_mmu.h> | ||
27 | #include <asm/kvm_psci.h> | ||
28 | |||
29 | typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); | ||
30 | |||
31 | static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
32 | { | ||
33 | if (kvm_psci_call(vcpu)) | ||
34 | return 1; | ||
35 | |||
36 | kvm_inject_undefined(vcpu); | ||
37 | return 1; | ||
38 | } | ||
39 | |||
40 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
41 | { | ||
42 | if (kvm_psci_call(vcpu)) | ||
43 | return 1; | ||
44 | |||
45 | kvm_inject_undefined(vcpu); | ||
46 | return 1; | ||
47 | } | ||
48 | |||
49 | /** | ||
50 | * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest | ||
51 | * @vcpu: the vcpu pointer | ||
52 | * | ||
53 | * Simply call kvm_vcpu_block(), which will halt execution of | ||
54 | * world-switches and schedule other host processes until there is an | ||
55 | * incoming IRQ or FIQ to the VM. | ||
56 | */ | ||
57 | static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
58 | { | ||
59 | kvm_vcpu_block(vcpu); | ||
60 | return 1; | ||
61 | } | ||
62 | |||
63 | static exit_handle_fn arm_exit_handlers[] = { | ||
64 | [ESR_EL2_EC_WFI] = kvm_handle_wfi, | ||
65 | [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32, | ||
66 | [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64, | ||
67 | [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access, | ||
68 | [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store, | ||
69 | [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_access, | ||
70 | [ESR_EL2_EC_HVC32] = handle_hvc, | ||
71 | [ESR_EL2_EC_SMC32] = handle_smc, | ||
72 | [ESR_EL2_EC_HVC64] = handle_hvc, | ||
73 | [ESR_EL2_EC_SMC64] = handle_smc, | ||
74 | [ESR_EL2_EC_SYS64] = kvm_handle_sys_reg, | ||
75 | [ESR_EL2_EC_IABT] = kvm_handle_guest_abort, | ||
76 | [ESR_EL2_EC_DABT] = kvm_handle_guest_abort, | ||
77 | }; | ||
78 | |||
79 | static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) | ||
80 | { | ||
81 | u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); | ||
82 | |||
83 | if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || | ||
84 | !arm_exit_handlers[hsr_ec]) { | ||
85 | kvm_err("Unkown exception class: hsr: %#08x\n", | ||
86 | (unsigned int)kvm_vcpu_get_hsr(vcpu)); | ||
87 | BUG(); | ||
88 | } | ||
89 | |||
90 | return arm_exit_handlers[hsr_ec]; | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on | ||
95 | * proper exit to userspace. | ||
96 | */ | ||
97 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
98 | int exception_index) | ||
99 | { | ||
100 | exit_handle_fn exit_handler; | ||
101 | |||
102 | switch (exception_index) { | ||
103 | case ARM_EXCEPTION_IRQ: | ||
104 | return 1; | ||
105 | case ARM_EXCEPTION_TRAP: | ||
106 | /* | ||
107 | * See ARM ARM B1.14.1: "Hyp traps on instructions | ||
108 | * that fail their condition code check" | ||
109 | */ | ||
110 | if (!kvm_condition_valid(vcpu)) { | ||
111 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | ||
112 | return 1; | ||
113 | } | ||
114 | |||
115 | exit_handler = kvm_get_exit_handler(vcpu); | ||
116 | |||
117 | return exit_handler(vcpu, run); | ||
118 | default: | ||
119 | kvm_pr_unimpl("Unsupported exception type: %d", | ||
120 | exception_index); | ||
121 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
122 | return 0; | ||
123 | } | ||
124 | } | ||
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S new file mode 100644 index 000000000000..ba84e6705e20 --- /dev/null +++ b/arch/arm64/kvm/hyp-init.S | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include <linux/linkage.h> | ||
19 | |||
20 | #include <asm/assembler.h> | ||
21 | #include <asm/kvm_arm.h> | ||
22 | #include <asm/kvm_mmu.h> | ||
23 | |||
24 | .text | ||
25 | .pushsection .hyp.idmap.text, "ax" | ||
26 | |||
27 | .align 11 | ||
28 | |||
29 | ENTRY(__kvm_hyp_init) | ||
30 | ventry __invalid // Synchronous EL2t | ||
31 | ventry __invalid // IRQ EL2t | ||
32 | ventry __invalid // FIQ EL2t | ||
33 | ventry __invalid // Error EL2t | ||
34 | |||
35 | ventry __invalid // Synchronous EL2h | ||
36 | ventry __invalid // IRQ EL2h | ||
37 | ventry __invalid // FIQ EL2h | ||
38 | ventry __invalid // Error EL2h | ||
39 | |||
40 | ventry __do_hyp_init // Synchronous 64-bit EL1 | ||
41 | ventry __invalid // IRQ 64-bit EL1 | ||
42 | ventry __invalid // FIQ 64-bit EL1 | ||
43 | ventry __invalid // Error 64-bit EL1 | ||
44 | |||
45 | ventry __invalid // Synchronous 32-bit EL1 | ||
46 | ventry __invalid // IRQ 32-bit EL1 | ||
47 | ventry __invalid // FIQ 32-bit EL1 | ||
48 | ventry __invalid // Error 32-bit EL1 | ||
49 | |||
50 | __invalid: | ||
51 | b . | ||
52 | |||
53 | /* | ||
54 | * x0: HYP boot pgd | ||
55 | * x1: HYP pgd | ||
56 | * x2: HYP stack | ||
57 | * x3: HYP vectors | ||
58 | */ | ||
59 | __do_hyp_init: | ||
60 | |||
61 | msr ttbr0_el2, x0 | ||
62 | |||
63 | mrs x4, tcr_el1 | ||
64 | ldr x5, =TCR_EL2_MASK | ||
65 | and x4, x4, x5 | ||
66 | ldr x5, =TCR_EL2_FLAGS | ||
67 | orr x4, x4, x5 | ||
68 | msr tcr_el2, x4 | ||
69 | |||
70 | ldr x4, =VTCR_EL2_FLAGS | ||
71 | msr vtcr_el2, x4 | ||
72 | |||
73 | mrs x4, mair_el1 | ||
74 | msr mair_el2, x4 | ||
75 | isb | ||
76 | |||
77 | mov x4, #SCTLR_EL2_FLAGS | ||
78 | msr sctlr_el2, x4 | ||
79 | isb | ||
80 | |||
81 | /* MMU is now enabled. Get ready for the trampoline dance */ | ||
82 | ldr x4, =TRAMPOLINE_VA | ||
83 | adr x5, target | ||
84 | bfi x4, x5, #0, #PAGE_SHIFT | ||
85 | br x4 | ||
86 | |||
87 | target: /* We're now in the trampoline code, switch page tables */ | ||
88 | msr ttbr0_el2, x1 | ||
89 | isb | ||
90 | |||
91 | /* Invalidate the old TLBs */ | ||
92 | tlbi alle2 | ||
93 | dsb sy | ||
94 | |||
95 | /* Set the stack and new vectors */ | ||
96 | kern_hyp_va x2 | ||
97 | mov sp, x2 | ||
98 | kern_hyp_va x3 | ||
99 | msr vbar_el2, x3 | ||
100 | |||
101 | /* Hello, World! */ | ||
102 | eret | ||
103 | ENDPROC(__kvm_hyp_init) | ||
104 | |||
105 | .ltorg | ||
106 | |||
107 | .popsection | ||
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S new file mode 100644 index 000000000000..ff985e3d8b72 --- /dev/null +++ b/arch/arm64/kvm/hyp.S | |||
@@ -0,0 +1,831 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include <linux/linkage.h> | ||
19 | #include <linux/irqchip/arm-gic.h> | ||
20 | |||
21 | #include <asm/assembler.h> | ||
22 | #include <asm/memory.h> | ||
23 | #include <asm/asm-offsets.h> | ||
24 | #include <asm/fpsimdmacros.h> | ||
25 | #include <asm/kvm.h> | ||
26 | #include <asm/kvm_asm.h> | ||
27 | #include <asm/kvm_arm.h> | ||
28 | #include <asm/kvm_mmu.h> | ||
29 | |||
30 | #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) | ||
31 | #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) | ||
32 | #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x) | ||
33 | #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x) | ||
34 | |||
35 | .text | ||
36 | .pushsection .hyp.text, "ax" | ||
37 | .align PAGE_SHIFT | ||
38 | |||
39 | __kvm_hyp_code_start: | ||
40 | .globl __kvm_hyp_code_start | ||
41 | |||
42 | .macro save_common_regs | ||
43 | // x2: base address for cpu context | ||
44 | // x3: tmp register | ||
45 | |||
46 | add x3, x2, #CPU_XREG_OFFSET(19) | ||
47 | stp x19, x20, [x3] | ||
48 | stp x21, x22, [x3, #16] | ||
49 | stp x23, x24, [x3, #32] | ||
50 | stp x25, x26, [x3, #48] | ||
51 | stp x27, x28, [x3, #64] | ||
52 | stp x29, lr, [x3, #80] | ||
53 | |||
54 | mrs x19, sp_el0 | ||
55 | mrs x20, elr_el2 // EL1 PC | ||
56 | mrs x21, spsr_el2 // EL1 pstate | ||
57 | |||
58 | stp x19, x20, [x3, #96] | ||
59 | str x21, [x3, #112] | ||
60 | |||
61 | mrs x22, sp_el1 | ||
62 | mrs x23, elr_el1 | ||
63 | mrs x24, spsr_el1 | ||
64 | |||
65 | str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] | ||
66 | str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] | ||
67 | str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] | ||
68 | .endm | ||
69 | |||
70 | .macro restore_common_regs | ||
71 | // x2: base address for cpu context | ||
72 | // x3: tmp register | ||
73 | |||
74 | ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] | ||
75 | ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] | ||
76 | ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] | ||
77 | |||
78 | msr sp_el1, x22 | ||
79 | msr elr_el1, x23 | ||
80 | msr spsr_el1, x24 | ||
81 | |||
82 | add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0 | ||
83 | ldp x19, x20, [x3] | ||
84 | ldr x21, [x3, #16] | ||
85 | |||
86 | msr sp_el0, x19 | ||
87 | msr elr_el2, x20 // EL1 PC | ||
88 | msr spsr_el2, x21 // EL1 pstate | ||
89 | |||
90 | add x3, x2, #CPU_XREG_OFFSET(19) | ||
91 | ldp x19, x20, [x3] | ||
92 | ldp x21, x22, [x3, #16] | ||
93 | ldp x23, x24, [x3, #32] | ||
94 | ldp x25, x26, [x3, #48] | ||
95 | ldp x27, x28, [x3, #64] | ||
96 | ldp x29, lr, [x3, #80] | ||
97 | .endm | ||
98 | |||
99 | .macro save_host_regs | ||
100 | save_common_regs | ||
101 | .endm | ||
102 | |||
103 | .macro restore_host_regs | ||
104 | restore_common_regs | ||
105 | .endm | ||
106 | |||
107 | .macro save_fpsimd | ||
108 | // x2: cpu context address | ||
109 | // x3, x4: tmp regs | ||
110 | add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) | ||
111 | fpsimd_save x3, 4 | ||
112 | .endm | ||
113 | |||
114 | .macro restore_fpsimd | ||
115 | // x2: cpu context address | ||
116 | // x3, x4: tmp regs | ||
117 | add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) | ||
118 | fpsimd_restore x3, 4 | ||
119 | .endm | ||
120 | |||
121 | .macro save_guest_regs | ||
122 | // x0 is the vcpu address | ||
123 | // x1 is the return code, do not corrupt! | ||
124 | // x2 is the cpu context | ||
125 | // x3 is a tmp register | ||
126 | // Guest's x0-x3 are on the stack | ||
127 | |||
128 | // Compute base to save registers | ||
129 | add x3, x2, #CPU_XREG_OFFSET(4) | ||
130 | stp x4, x5, [x3] | ||
131 | stp x6, x7, [x3, #16] | ||
132 | stp x8, x9, [x3, #32] | ||
133 | stp x10, x11, [x3, #48] | ||
134 | stp x12, x13, [x3, #64] | ||
135 | stp x14, x15, [x3, #80] | ||
136 | stp x16, x17, [x3, #96] | ||
137 | str x18, [x3, #112] | ||
138 | |||
139 | pop x6, x7 // x2, x3 | ||
140 | pop x4, x5 // x0, x1 | ||
141 | |||
142 | add x3, x2, #CPU_XREG_OFFSET(0) | ||
143 | stp x4, x5, [x3] | ||
144 | stp x6, x7, [x3, #16] | ||
145 | |||
146 | save_common_regs | ||
147 | .endm | ||
148 | |||
149 | .macro restore_guest_regs | ||
150 | // x0 is the vcpu address. | ||
151 | // x2 is the cpu context | ||
152 | // x3 is a tmp register | ||
153 | |||
154 | // Prepare x0-x3 for later restore | ||
155 | add x3, x2, #CPU_XREG_OFFSET(0) | ||
156 | ldp x4, x5, [x3] | ||
157 | ldp x6, x7, [x3, #16] | ||
158 | push x4, x5 // Push x0-x3 on the stack | ||
159 | push x6, x7 | ||
160 | |||
161 | // x4-x18 | ||
162 | ldp x4, x5, [x3, #32] | ||
163 | ldp x6, x7, [x3, #48] | ||
164 | ldp x8, x9, [x3, #64] | ||
165 | ldp x10, x11, [x3, #80] | ||
166 | ldp x12, x13, [x3, #96] | ||
167 | ldp x14, x15, [x3, #112] | ||
168 | ldp x16, x17, [x3, #128] | ||
169 | ldr x18, [x3, #144] | ||
170 | |||
171 | // x19-x29, lr, sp*, elr*, spsr* | ||
172 | restore_common_regs | ||
173 | |||
174 | // Last bits of the 64bit state | ||
175 | pop x2, x3 | ||
176 | pop x0, x1 | ||
177 | |||
178 | // Do not touch any register after this! | ||
179 | .endm | ||
180 | |||
181 | /* | ||
182 | * Macros to perform system register save/restore. | ||
183 | * | ||
184 | * Ordering here is absolutely critical, and must be kept consistent | ||
185 | * in {save,restore}_sysregs, {save,restore}_guest_32bit_state, | ||
186 | * and in kvm_asm.h. | ||
187 | * | ||
188 | * In other words, don't touch any of these unless you know what | ||
189 | * you are doing. | ||
190 | */ | ||
191 | .macro save_sysregs | ||
192 | // x2: base address for cpu context | ||
193 | // x3: tmp register | ||
194 | |||
195 | add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) | ||
196 | |||
197 | mrs x4, vmpidr_el2 | ||
198 | mrs x5, csselr_el1 | ||
199 | mrs x6, sctlr_el1 | ||
200 | mrs x7, actlr_el1 | ||
201 | mrs x8, cpacr_el1 | ||
202 | mrs x9, ttbr0_el1 | ||
203 | mrs x10, ttbr1_el1 | ||
204 | mrs x11, tcr_el1 | ||
205 | mrs x12, esr_el1 | ||
206 | mrs x13, afsr0_el1 | ||
207 | mrs x14, afsr1_el1 | ||
208 | mrs x15, far_el1 | ||
209 | mrs x16, mair_el1 | ||
210 | mrs x17, vbar_el1 | ||
211 | mrs x18, contextidr_el1 | ||
212 | mrs x19, tpidr_el0 | ||
213 | mrs x20, tpidrro_el0 | ||
214 | mrs x21, tpidr_el1 | ||
215 | mrs x22, amair_el1 | ||
216 | mrs x23, cntkctl_el1 | ||
217 | |||
218 | stp x4, x5, [x3] | ||
219 | stp x6, x7, [x3, #16] | ||
220 | stp x8, x9, [x3, #32] | ||
221 | stp x10, x11, [x3, #48] | ||
222 | stp x12, x13, [x3, #64] | ||
223 | stp x14, x15, [x3, #80] | ||
224 | stp x16, x17, [x3, #96] | ||
225 | stp x18, x19, [x3, #112] | ||
226 | stp x20, x21, [x3, #128] | ||
227 | stp x22, x23, [x3, #144] | ||
228 | .endm | ||
229 | |||
230 | .macro restore_sysregs | ||
231 | // x2: base address for cpu context | ||
232 | // x3: tmp register | ||
233 | |||
234 | add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) | ||
235 | |||
236 | ldp x4, x5, [x3] | ||
237 | ldp x6, x7, [x3, #16] | ||
238 | ldp x8, x9, [x3, #32] | ||
239 | ldp x10, x11, [x3, #48] | ||
240 | ldp x12, x13, [x3, #64] | ||
241 | ldp x14, x15, [x3, #80] | ||
242 | ldp x16, x17, [x3, #96] | ||
243 | ldp x18, x19, [x3, #112] | ||
244 | ldp x20, x21, [x3, #128] | ||
245 | ldp x22, x23, [x3, #144] | ||
246 | |||
247 | msr vmpidr_el2, x4 | ||
248 | msr csselr_el1, x5 | ||
249 | msr sctlr_el1, x6 | ||
250 | msr actlr_el1, x7 | ||
251 | msr cpacr_el1, x8 | ||
252 | msr ttbr0_el1, x9 | ||
253 | msr ttbr1_el1, x10 | ||
254 | msr tcr_el1, x11 | ||
255 | msr esr_el1, x12 | ||
256 | msr afsr0_el1, x13 | ||
257 | msr afsr1_el1, x14 | ||
258 | msr far_el1, x15 | ||
259 | msr mair_el1, x16 | ||
260 | msr vbar_el1, x17 | ||
261 | msr contextidr_el1, x18 | ||
262 | msr tpidr_el0, x19 | ||
263 | msr tpidrro_el0, x20 | ||
264 | msr tpidr_el1, x21 | ||
265 | msr amair_el1, x22 | ||
266 | msr cntkctl_el1, x23 | ||
267 | .endm | ||
268 | |||
269 | .macro skip_32bit_state tmp, target | ||
270 | // Skip 32bit state if not needed | ||
271 | mrs \tmp, hcr_el2 | ||
272 | tbnz \tmp, #HCR_RW_SHIFT, \target | ||
273 | .endm | ||
274 | |||
275 | .macro skip_tee_state tmp, target | ||
276 | // Skip ThumbEE state if not needed | ||
277 | mrs \tmp, id_pfr0_el1 | ||
278 | tbz \tmp, #12, \target | ||
279 | .endm | ||
280 | |||
281 | .macro save_guest_32bit_state | ||
282 | skip_32bit_state x3, 1f | ||
283 | |||
284 | add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) | ||
285 | mrs x4, spsr_abt | ||
286 | mrs x5, spsr_und | ||
287 | mrs x6, spsr_irq | ||
288 | mrs x7, spsr_fiq | ||
289 | stp x4, x5, [x3] | ||
290 | stp x6, x7, [x3, #16] | ||
291 | |||
292 | add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) | ||
293 | mrs x4, dacr32_el2 | ||
294 | mrs x5, ifsr32_el2 | ||
295 | mrs x6, fpexc32_el2 | ||
296 | mrs x7, dbgvcr32_el2 | ||
297 | stp x4, x5, [x3] | ||
298 | stp x6, x7, [x3, #16] | ||
299 | |||
300 | skip_tee_state x8, 1f | ||
301 | |||
302 | add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) | ||
303 | mrs x4, teecr32_el1 | ||
304 | mrs x5, teehbr32_el1 | ||
305 | stp x4, x5, [x3] | ||
306 | 1: | ||
307 | .endm | ||
308 | |||
309 | .macro restore_guest_32bit_state | ||
310 | skip_32bit_state x3, 1f | ||
311 | |||
312 | add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) | ||
313 | ldp x4, x5, [x3] | ||
314 | ldp x6, x7, [x3, #16] | ||
315 | msr spsr_abt, x4 | ||
316 | msr spsr_und, x5 | ||
317 | msr spsr_irq, x6 | ||
318 | msr spsr_fiq, x7 | ||
319 | |||
320 | add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) | ||
321 | ldp x4, x5, [x3] | ||
322 | ldp x6, x7, [x3, #16] | ||
323 | msr dacr32_el2, x4 | ||
324 | msr ifsr32_el2, x5 | ||
325 | msr fpexc32_el2, x6 | ||
326 | msr dbgvcr32_el2, x7 | ||
327 | |||
328 | skip_tee_state x8, 1f | ||
329 | |||
330 | add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) | ||
331 | ldp x4, x5, [x3] | ||
332 | msr teecr32_el1, x4 | ||
333 | msr teehbr32_el1, x5 | ||
334 | 1: | ||
335 | .endm | ||
336 | |||
337 | .macro activate_traps | ||
338 | ldr x2, [x0, #VCPU_IRQ_LINES] | ||
339 | ldr x1, [x0, #VCPU_HCR_EL2] | ||
340 | orr x2, x2, x1 | ||
341 | msr hcr_el2, x2 | ||
342 | |||
343 | ldr x2, =(CPTR_EL2_TTA) | ||
344 | msr cptr_el2, x2 | ||
345 | |||
346 | ldr x2, =(1 << 15) // Trap CP15 Cr=15 | ||
347 | msr hstr_el2, x2 | ||
348 | |||
349 | mrs x2, mdcr_el2 | ||
350 | and x2, x2, #MDCR_EL2_HPMN_MASK | ||
351 | orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR) | ||
352 | msr mdcr_el2, x2 | ||
353 | .endm | ||
354 | |||
355 | .macro deactivate_traps | ||
356 | mov x2, #HCR_RW | ||
357 | msr hcr_el2, x2 | ||
358 | msr cptr_el2, xzr | ||
359 | msr hstr_el2, xzr | ||
360 | |||
361 | mrs x2, mdcr_el2 | ||
362 | and x2, x2, #MDCR_EL2_HPMN_MASK | ||
363 | msr mdcr_el2, x2 | ||
364 | .endm | ||
365 | |||
366 | .macro activate_vm | ||
367 | ldr x1, [x0, #VCPU_KVM] | ||
368 | kern_hyp_va x1 | ||
369 | ldr x2, [x1, #KVM_VTTBR] | ||
370 | msr vttbr_el2, x2 | ||
371 | .endm | ||
372 | |||
373 | .macro deactivate_vm | ||
374 | msr vttbr_el2, xzr | ||
375 | .endm | ||
376 | |||
377 | /* | ||
378 | * Save the VGIC CPU state into memory | ||
379 | * x0: Register pointing to VCPU struct | ||
380 | * Do not corrupt x1!!! | ||
381 | */ | ||
382 | .macro save_vgic_state | ||
383 | /* Get VGIC VCTRL base into x2 */ | ||
384 | ldr x2, [x0, #VCPU_KVM] | ||
385 | kern_hyp_va x2 | ||
386 | ldr x2, [x2, #KVM_VGIC_VCTRL] | ||
387 | kern_hyp_va x2 | ||
388 | cbz x2, 2f // disabled | ||
389 | |||
390 | /* Compute the address of struct vgic_cpu */ | ||
391 | add x3, x0, #VCPU_VGIC_CPU | ||
392 | |||
393 | /* Save all interesting registers */ | ||
394 | ldr w4, [x2, #GICH_HCR] | ||
395 | ldr w5, [x2, #GICH_VMCR] | ||
396 | ldr w6, [x2, #GICH_MISR] | ||
397 | ldr w7, [x2, #GICH_EISR0] | ||
398 | ldr w8, [x2, #GICH_EISR1] | ||
399 | ldr w9, [x2, #GICH_ELRSR0] | ||
400 | ldr w10, [x2, #GICH_ELRSR1] | ||
401 | ldr w11, [x2, #GICH_APR] | ||
402 | |||
403 | str w4, [x3, #VGIC_CPU_HCR] | ||
404 | str w5, [x3, #VGIC_CPU_VMCR] | ||
405 | str w6, [x3, #VGIC_CPU_MISR] | ||
406 | str w7, [x3, #VGIC_CPU_EISR] | ||
407 | str w8, [x3, #(VGIC_CPU_EISR + 4)] | ||
408 | str w9, [x3, #VGIC_CPU_ELRSR] | ||
409 | str w10, [x3, #(VGIC_CPU_ELRSR + 4)] | ||
410 | str w11, [x3, #VGIC_CPU_APR] | ||
411 | |||
412 | /* Clear GICH_HCR */ | ||
413 | str wzr, [x2, #GICH_HCR] | ||
414 | |||
415 | /* Save list registers */ | ||
416 | add x2, x2, #GICH_LR0 | ||
417 | ldr w4, [x3, #VGIC_CPU_NR_LR] | ||
418 | add x3, x3, #VGIC_CPU_LR | ||
419 | 1: ldr w5, [x2], #4 | ||
420 | str w5, [x3], #4 | ||
421 | sub w4, w4, #1 | ||
422 | cbnz w4, 1b | ||
423 | 2: | ||
424 | .endm | ||
425 | |||
426 | /* | ||
427 | * Restore the VGIC CPU state from memory | ||
428 | * x0: Register pointing to VCPU struct | ||
429 | */ | ||
430 | .macro restore_vgic_state | ||
431 | /* Get VGIC VCTRL base into x2 */ | ||
432 | ldr x2, [x0, #VCPU_KVM] | ||
433 | kern_hyp_va x2 | ||
434 | ldr x2, [x2, #KVM_VGIC_VCTRL] | ||
435 | kern_hyp_va x2 | ||
436 | cbz x2, 2f // disabled | ||
437 | |||
438 | /* Compute the address of struct vgic_cpu */ | ||
439 | add x3, x0, #VCPU_VGIC_CPU | ||
440 | |||
441 | /* We only restore a minimal set of registers */ | ||
442 | ldr w4, [x3, #VGIC_CPU_HCR] | ||
443 | ldr w5, [x3, #VGIC_CPU_VMCR] | ||
444 | ldr w6, [x3, #VGIC_CPU_APR] | ||
445 | |||
446 | str w4, [x2, #GICH_HCR] | ||
447 | str w5, [x2, #GICH_VMCR] | ||
448 | str w6, [x2, #GICH_APR] | ||
449 | |||
450 | /* Restore list registers */ | ||
451 | add x2, x2, #GICH_LR0 | ||
452 | ldr w4, [x3, #VGIC_CPU_NR_LR] | ||
453 | add x3, x3, #VGIC_CPU_LR | ||
454 | 1: ldr w5, [x3], #4 | ||
455 | str w5, [x2], #4 | ||
456 | sub w4, w4, #1 | ||
457 | cbnz w4, 1b | ||
458 | 2: | ||
459 | .endm | ||
460 | |||
461 | .macro save_timer_state | ||
462 | // x0: vcpu pointer | ||
463 | ldr x2, [x0, #VCPU_KVM] | ||
464 | kern_hyp_va x2 | ||
465 | ldr w3, [x2, #KVM_TIMER_ENABLED] | ||
466 | cbz w3, 1f | ||
467 | |||
468 | mrs x3, cntv_ctl_el0 | ||
469 | and x3, x3, #3 | ||
470 | str w3, [x0, #VCPU_TIMER_CNTV_CTL] | ||
471 | bic x3, x3, #1 // Clear Enable | ||
472 | msr cntv_ctl_el0, x3 | ||
473 | |||
474 | isb | ||
475 | |||
476 | mrs x3, cntv_cval_el0 | ||
477 | str x3, [x0, #VCPU_TIMER_CNTV_CVAL] | ||
478 | |||
479 | 1: | ||
480 | // Allow physical timer/counter access for the host | ||
481 | mrs x2, cnthctl_el2 | ||
482 | orr x2, x2, #3 | ||
483 | msr cnthctl_el2, x2 | ||
484 | |||
485 | // Clear cntvoff for the host | ||
486 | msr cntvoff_el2, xzr | ||
487 | .endm | ||
488 | |||
489 | .macro restore_timer_state | ||
490 | // x0: vcpu pointer | ||
491 | // Disallow physical timer access for the guest | ||
492 | // Physical counter access is allowed | ||
493 | mrs x2, cnthctl_el2 | ||
494 | orr x2, x2, #1 | ||
495 | bic x2, x2, #2 | ||
496 | msr cnthctl_el2, x2 | ||
497 | |||
498 | ldr x2, [x0, #VCPU_KVM] | ||
499 | kern_hyp_va x2 | ||
500 | ldr w3, [x2, #KVM_TIMER_ENABLED] | ||
501 | cbz w3, 1f | ||
502 | |||
503 | ldr x3, [x2, #KVM_TIMER_CNTVOFF] | ||
504 | msr cntvoff_el2, x3 | ||
505 | ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL] | ||
506 | msr cntv_cval_el0, x2 | ||
507 | isb | ||
508 | |||
509 | ldr w2, [x0, #VCPU_TIMER_CNTV_CTL] | ||
510 | and x2, x2, #3 | ||
511 | msr cntv_ctl_el0, x2 | ||
512 | 1: | ||
513 | .endm | ||
514 | |||
515 | __save_sysregs: | ||
516 | save_sysregs | ||
517 | ret | ||
518 | |||
519 | __restore_sysregs: | ||
520 | restore_sysregs | ||
521 | ret | ||
522 | |||
523 | __save_fpsimd: | ||
524 | save_fpsimd | ||
525 | ret | ||
526 | |||
527 | __restore_fpsimd: | ||
528 | restore_fpsimd | ||
529 | ret | ||
530 | |||
531 | /* | ||
532 | * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu); | ||
533 | * | ||
534 | * This is the world switch. The first half of the function | ||
535 | * deals with entering the guest, and anything from __kvm_vcpu_return | ||
536 | * to the end of the function deals with reentering the host. | ||
537 | * On the enter path, only x0 (vcpu pointer) must be preserved until | ||
538 | * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception | ||
539 | * code) must both be preserved until the epilogue. | ||
540 | * In both cases, x2 points to the CPU context we're saving/restoring from/to. | ||
541 | */ | ||
542 | ENTRY(__kvm_vcpu_run) | ||
543 | kern_hyp_va x0 | ||
544 | msr tpidr_el2, x0 // Save the vcpu register | ||
545 | |||
546 | // Host context | ||
547 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | ||
548 | kern_hyp_va x2 | ||
549 | |||
550 | save_host_regs | ||
551 | bl __save_fpsimd | ||
552 | bl __save_sysregs | ||
553 | |||
554 | activate_traps | ||
555 | activate_vm | ||
556 | |||
557 | restore_vgic_state | ||
558 | restore_timer_state | ||
559 | |||
560 | // Guest context | ||
561 | add x2, x0, #VCPU_CONTEXT | ||
562 | |||
563 | bl __restore_sysregs | ||
564 | bl __restore_fpsimd | ||
565 | restore_guest_32bit_state | ||
566 | restore_guest_regs | ||
567 | |||
568 | // That's it, no more messing around. | ||
569 | eret | ||
570 | |||
571 | __kvm_vcpu_return: | ||
572 | // Assume x0 is the vcpu pointer, x1 the return code | ||
573 | // Guest's x0-x3 are on the stack | ||
574 | |||
575 | // Guest context | ||
576 | add x2, x0, #VCPU_CONTEXT | ||
577 | |||
578 | save_guest_regs | ||
579 | bl __save_fpsimd | ||
580 | bl __save_sysregs | ||
581 | save_guest_32bit_state | ||
582 | |||
583 | save_timer_state | ||
584 | save_vgic_state | ||
585 | |||
586 | deactivate_traps | ||
587 | deactivate_vm | ||
588 | |||
589 | // Host context | ||
590 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | ||
591 | kern_hyp_va x2 | ||
592 | |||
593 | bl __restore_sysregs | ||
594 | bl __restore_fpsimd | ||
595 | restore_host_regs | ||
596 | |||
597 | mov x0, x1 | ||
598 | ret | ||
599 | END(__kvm_vcpu_run) | ||
600 | |||
601 | // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | ||
602 | ENTRY(__kvm_tlb_flush_vmid_ipa) | ||
603 | kern_hyp_va x0 | ||
604 | ldr x2, [x0, #KVM_VTTBR] | ||
605 | msr vttbr_el2, x2 | ||
606 | isb | ||
607 | |||
608 | /* | ||
609 | * We could do so much better if we had the VA as well. | ||
610 | * Instead, we invalidate Stage-2 for this IPA, and the | ||
611 | * whole of Stage-1. Weep... | ||
612 | */ | ||
613 | tlbi ipas2e1is, x1 | ||
614 | dsb sy | ||
615 | tlbi vmalle1is | ||
616 | dsb sy | ||
617 | isb | ||
618 | |||
619 | msr vttbr_el2, xzr | ||
620 | ret | ||
621 | ENDPROC(__kvm_tlb_flush_vmid_ipa) | ||
622 | |||
623 | ENTRY(__kvm_flush_vm_context) | ||
624 | tlbi alle1is | ||
625 | ic ialluis | ||
626 | dsb sy | ||
627 | ret | ||
628 | ENDPROC(__kvm_flush_vm_context) | ||
629 | |||
630 | __kvm_hyp_panic: | ||
631 | // Guess the context by looking at VTTBR: | ||
632 | // If zero, then we're already a host. | ||
633 | // Otherwise restore a minimal host context before panicing. | ||
634 | mrs x0, vttbr_el2 | ||
635 | cbz x0, 1f | ||
636 | |||
637 | mrs x0, tpidr_el2 | ||
638 | |||
639 | deactivate_traps | ||
640 | deactivate_vm | ||
641 | |||
642 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | ||
643 | kern_hyp_va x2 | ||
644 | |||
645 | bl __restore_sysregs | ||
646 | |||
647 | 1: adr x0, __hyp_panic_str | ||
648 | adr x1, 2f | ||
649 | ldp x2, x3, [x1] | ||
650 | sub x0, x0, x2 | ||
651 | add x0, x0, x3 | ||
652 | mrs x1, spsr_el2 | ||
653 | mrs x2, elr_el2 | ||
654 | mrs x3, esr_el2 | ||
655 | mrs x4, far_el2 | ||
656 | mrs x5, hpfar_el2 | ||
657 | mrs x6, par_el1 | ||
658 | mrs x7, tpidr_el2 | ||
659 | |||
660 | mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ | ||
661 | PSR_MODE_EL1h) | ||
662 | msr spsr_el2, lr | ||
663 | ldr lr, =panic | ||
664 | msr elr_el2, lr | ||
665 | eret | ||
666 | |||
667 | .align 3 | ||
668 | 2: .quad HYP_PAGE_OFFSET | ||
669 | .quad PAGE_OFFSET | ||
670 | ENDPROC(__kvm_hyp_panic) | ||
671 | |||
672 | __hyp_panic_str: | ||
673 | .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" | ||
674 | |||
675 | .align 2 | ||
676 | |||
677 | ENTRY(kvm_call_hyp) | ||
678 | hvc #0 | ||
679 | ret | ||
680 | ENDPROC(kvm_call_hyp) | ||
681 | |||
682 | .macro invalid_vector label, target | ||
683 | .align 2 | ||
684 | \label: | ||
685 | b \target | ||
686 | ENDPROC(\label) | ||
687 | .endm | ||
688 | |||
689 | /* None of these should ever happen */ | ||
690 | invalid_vector el2t_sync_invalid, __kvm_hyp_panic | ||
691 | invalid_vector el2t_irq_invalid, __kvm_hyp_panic | ||
692 | invalid_vector el2t_fiq_invalid, __kvm_hyp_panic | ||
693 | invalid_vector el2t_error_invalid, __kvm_hyp_panic | ||
694 | invalid_vector el2h_sync_invalid, __kvm_hyp_panic | ||
695 | invalid_vector el2h_irq_invalid, __kvm_hyp_panic | ||
696 | invalid_vector el2h_fiq_invalid, __kvm_hyp_panic | ||
697 | invalid_vector el2h_error_invalid, __kvm_hyp_panic | ||
698 | invalid_vector el1_sync_invalid, __kvm_hyp_panic | ||
699 | invalid_vector el1_irq_invalid, __kvm_hyp_panic | ||
700 | invalid_vector el1_fiq_invalid, __kvm_hyp_panic | ||
701 | invalid_vector el1_error_invalid, __kvm_hyp_panic | ||
702 | |||
703 | el1_sync: // Guest trapped into EL2 | ||
704 | push x0, x1 | ||
705 | push x2, x3 | ||
706 | |||
707 | mrs x1, esr_el2 | ||
708 | lsr x2, x1, #ESR_EL2_EC_SHIFT | ||
709 | |||
710 | cmp x2, #ESR_EL2_EC_HVC64 | ||
711 | b.ne el1_trap | ||
712 | |||
713 | mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest | ||
714 | cbnz x3, el1_trap // called HVC | ||
715 | |||
716 | /* Here, we're pretty sure the host called HVC. */ | ||
717 | pop x2, x3 | ||
718 | pop x0, x1 | ||
719 | |||
720 | push lr, xzr | ||
721 | |||
722 | /* | ||
723 | * Compute the function address in EL2, and shuffle the parameters. | ||
724 | */ | ||
725 | kern_hyp_va x0 | ||
726 | mov lr, x0 | ||
727 | mov x0, x1 | ||
728 | mov x1, x2 | ||
729 | mov x2, x3 | ||
730 | blr lr | ||
731 | |||
732 | pop lr, xzr | ||
733 | eret | ||
734 | |||
735 | el1_trap: | ||
736 | /* | ||
737 | * x1: ESR | ||
738 | * x2: ESR_EC | ||
739 | */ | ||
740 | cmp x2, #ESR_EL2_EC_DABT | ||
741 | mov x0, #ESR_EL2_EC_IABT | ||
742 | ccmp x2, x0, #4, ne | ||
743 | b.ne 1f // Not an abort we care about | ||
744 | |||
745 | /* This is an abort. Check for permission fault */ | ||
746 | and x2, x1, #ESR_EL2_FSC_TYPE | ||
747 | cmp x2, #FSC_PERM | ||
748 | b.ne 1f // Not a permission fault | ||
749 | |||
750 | /* | ||
751 | * Check for Stage-1 page table walk, which is guaranteed | ||
752 | * to give a valid HPFAR_EL2. | ||
753 | */ | ||
754 | tbnz x1, #7, 1f // S1PTW is set | ||
755 | |||
756 | /* | ||
757 | * Permission fault, HPFAR_EL2 is invalid. | ||
758 | * Resolve the IPA the hard way using the guest VA. | ||
759 | * Stage-1 translation already validated the memory access rights. | ||
760 | * As such, we can use the EL1 translation regime, and don't have | ||
761 | * to distinguish between EL0 and EL1 access. | ||
762 | */ | ||
763 | mrs x2, far_el2 | ||
764 | at s1e1r, x2 | ||
765 | isb | ||
766 | |||
767 | /* Read result */ | ||
768 | mrs x3, par_el1 | ||
769 | tbnz x3, #0, 3f // Bail out if we failed the translation | ||
770 | ubfx x3, x3, #12, #36 // Extract IPA | ||
771 | lsl x3, x3, #4 // and present it like HPFAR | ||
772 | b 2f | ||
773 | |||
774 | 1: mrs x3, hpfar_el2 | ||
775 | mrs x2, far_el2 | ||
776 | |||
777 | 2: mrs x0, tpidr_el2 | ||
778 | str x1, [x0, #VCPU_ESR_EL2] | ||
779 | str x2, [x0, #VCPU_FAR_EL2] | ||
780 | str x3, [x0, #VCPU_HPFAR_EL2] | ||
781 | |||
782 | mov x1, #ARM_EXCEPTION_TRAP | ||
783 | b __kvm_vcpu_return | ||
784 | |||
785 | /* | ||
786 | * Translation failed. Just return to the guest and | ||
787 | * let it fault again. Another CPU is probably playing | ||
788 | * behind our back. | ||
789 | */ | ||
790 | 3: pop x2, x3 | ||
791 | pop x0, x1 | ||
792 | |||
793 | eret | ||
794 | |||
795 | el1_irq: | ||
796 | push x0, x1 | ||
797 | push x2, x3 | ||
798 | mrs x0, tpidr_el2 | ||
799 | mov x1, #ARM_EXCEPTION_IRQ | ||
800 | b __kvm_vcpu_return | ||
801 | |||
802 | .ltorg | ||
803 | |||
804 | .align 11 | ||
805 | |||
806 | ENTRY(__kvm_hyp_vector) | ||
807 | ventry el2t_sync_invalid // Synchronous EL2t | ||
808 | ventry el2t_irq_invalid // IRQ EL2t | ||
809 | ventry el2t_fiq_invalid // FIQ EL2t | ||
810 | ventry el2t_error_invalid // Error EL2t | ||
811 | |||
812 | ventry el2h_sync_invalid // Synchronous EL2h | ||
813 | ventry el2h_irq_invalid // IRQ EL2h | ||
814 | ventry el2h_fiq_invalid // FIQ EL2h | ||
815 | ventry el2h_error_invalid // Error EL2h | ||
816 | |||
817 | ventry el1_sync // Synchronous 64-bit EL1 | ||
818 | ventry el1_irq // IRQ 64-bit EL1 | ||
819 | ventry el1_fiq_invalid // FIQ 64-bit EL1 | ||
820 | ventry el1_error_invalid // Error 64-bit EL1 | ||
821 | |||
822 | ventry el1_sync // Synchronous 32-bit EL1 | ||
823 | ventry el1_irq // IRQ 32-bit EL1 | ||
824 | ventry el1_fiq_invalid // FIQ 32-bit EL1 | ||
825 | ventry el1_error_invalid // Error 32-bit EL1 | ||
826 | ENDPROC(__kvm_hyp_vector) | ||
827 | |||
828 | __kvm_hyp_code_end: | ||
829 | .globl __kvm_hyp_code_end | ||
830 | |||
831 | .popsection | ||
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c new file mode 100644 index 000000000000..81a02a8762b0 --- /dev/null +++ b/arch/arm64/kvm/inject_fault.c | |||
@@ -0,0 +1,203 @@ | |||
1 | /* | ||
2 | * Fault injection for both 32 and 64bit guests. | ||
3 | * | ||
4 | * Copyright (C) 2012,2013 - ARM Ltd | ||
5 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
6 | * | ||
7 | * Based on arch/arm/kvm/emulate.c | ||
8 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
9 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
10 | * | ||
11 | * This program is free software: you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
22 | */ | ||
23 | |||
24 | #include <linux/kvm_host.h> | ||
25 | #include <asm/kvm_emulate.h> | ||
26 | #include <asm/esr.h> | ||
27 | |||
28 | #define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \ | ||
29 | PSR_I_BIT | PSR_D_BIT) | ||
30 | #define EL1_EXCEPT_SYNC_OFFSET 0x200 | ||
31 | |||
32 | static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) | ||
33 | { | ||
34 | unsigned long cpsr; | ||
35 | unsigned long new_spsr_value = *vcpu_cpsr(vcpu); | ||
36 | bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); | ||
37 | u32 return_offset = (is_thumb) ? 4 : 0; | ||
38 | u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); | ||
39 | |||
40 | cpsr = mode | COMPAT_PSR_I_BIT; | ||
41 | |||
42 | if (sctlr & (1 << 30)) | ||
43 | cpsr |= COMPAT_PSR_T_BIT; | ||
44 | if (sctlr & (1 << 25)) | ||
45 | cpsr |= COMPAT_PSR_E_BIT; | ||
46 | |||
47 | *vcpu_cpsr(vcpu) = cpsr; | ||
48 | |||
49 | /* Note: These now point to the banked copies */ | ||
50 | *vcpu_spsr(vcpu) = new_spsr_value; | ||
51 | *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; | ||
52 | |||
53 | /* Branch to exception vector */ | ||
54 | if (sctlr & (1 << 13)) | ||
55 | vect_offset += 0xffff0000; | ||
56 | else /* always have security exceptions */ | ||
57 | vect_offset += vcpu_cp15(vcpu, c12_VBAR); | ||
58 | |||
59 | *vcpu_pc(vcpu) = vect_offset; | ||
60 | } | ||
61 | |||
62 | static void inject_undef32(struct kvm_vcpu *vcpu) | ||
63 | { | ||
64 | prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); | ||
65 | } | ||
66 | |||
67 | /* | ||
68 | * Modelled after TakeDataAbortException() and TakePrefetchAbortException | ||
69 | * pseudocode. | ||
70 | */ | ||
71 | static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, | ||
72 | unsigned long addr) | ||
73 | { | ||
74 | u32 vect_offset; | ||
75 | u32 *far, *fsr; | ||
76 | bool is_lpae; | ||
77 | |||
78 | if (is_pabt) { | ||
79 | vect_offset = 12; | ||
80 | far = &vcpu_cp15(vcpu, c6_IFAR); | ||
81 | fsr = &vcpu_cp15(vcpu, c5_IFSR); | ||
82 | } else { /* !iabt */ | ||
83 | vect_offset = 16; | ||
84 | far = &vcpu_cp15(vcpu, c6_DFAR); | ||
85 | fsr = &vcpu_cp15(vcpu, c5_DFSR); | ||
86 | } | ||
87 | |||
88 | prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); | ||
89 | |||
90 | *far = addr; | ||
91 | |||
92 | /* Give the guest an IMPLEMENTATION DEFINED exception */ | ||
93 | is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); | ||
94 | if (is_lpae) | ||
95 | *fsr = 1 << 9 | 0x34; | ||
96 | else | ||
97 | *fsr = 0x14; | ||
98 | } | ||
99 | |||
100 | static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) | ||
101 | { | ||
102 | unsigned long cpsr = *vcpu_cpsr(vcpu); | ||
103 | bool is_aarch32; | ||
104 | u32 esr = 0; | ||
105 | |||
106 | is_aarch32 = vcpu_mode_is_32bit(vcpu); | ||
107 | |||
108 | *vcpu_spsr(vcpu) = cpsr; | ||
109 | *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); | ||
110 | |||
111 | *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; | ||
112 | *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET; | ||
113 | |||
114 | vcpu_sys_reg(vcpu, FAR_EL1) = addr; | ||
115 | |||
116 | /* | ||
117 | * Build an {i,d}abort, depending on the level and the | ||
118 | * instruction set. Report an external synchronous abort. | ||
119 | */ | ||
120 | if (kvm_vcpu_trap_il_is32bit(vcpu)) | ||
121 | esr |= ESR_EL1_IL; | ||
122 | |||
123 | /* | ||
124 | * Here, the guest runs in AArch64 mode when in EL1. If we get | ||
125 | * an AArch32 fault, it means we managed to trap an EL0 fault. | ||
126 | */ | ||
127 | if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t) | ||
128 | esr |= (ESR_EL1_EC_IABT_EL0 << ESR_EL1_EC_SHIFT); | ||
129 | else | ||
130 | esr |= (ESR_EL1_EC_IABT_EL1 << ESR_EL1_EC_SHIFT); | ||
131 | |||
132 | if (!is_iabt) | ||
133 | esr |= ESR_EL1_EC_DABT_EL0; | ||
134 | |||
135 | vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_EL2_EC_xABT_xFSR_EXTABT; | ||
136 | } | ||
137 | |||
138 | static void inject_undef64(struct kvm_vcpu *vcpu) | ||
139 | { | ||
140 | unsigned long cpsr = *vcpu_cpsr(vcpu); | ||
141 | u32 esr = (ESR_EL1_EC_UNKNOWN << ESR_EL1_EC_SHIFT); | ||
142 | |||
143 | *vcpu_spsr(vcpu) = cpsr; | ||
144 | *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); | ||
145 | |||
146 | *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; | ||
147 | *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET; | ||
148 | |||
149 | /* | ||
150 | * Build an unknown exception, depending on the instruction | ||
151 | * set. | ||
152 | */ | ||
153 | if (kvm_vcpu_trap_il_is32bit(vcpu)) | ||
154 | esr |= ESR_EL1_IL; | ||
155 | |||
156 | vcpu_sys_reg(vcpu, ESR_EL1) = esr; | ||
157 | } | ||
158 | |||
159 | /** | ||
160 | * kvm_inject_dabt - inject a data abort into the guest | ||
161 | * @vcpu: The VCPU to receive the undefined exception | ||
162 | * @addr: The address to report in the DFAR | ||
163 | * | ||
164 | * It is assumed that this code is called from the VCPU thread and that the | ||
165 | * VCPU therefore is not currently executing guest code. | ||
166 | */ | ||
167 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | ||
168 | { | ||
169 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | ||
170 | inject_abt32(vcpu, false, addr); | ||
171 | |||
172 | inject_abt64(vcpu, false, addr); | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * kvm_inject_pabt - inject a prefetch abort into the guest | ||
177 | * @vcpu: The VCPU to receive the undefined exception | ||
178 | * @addr: The address to report in the DFAR | ||
179 | * | ||
180 | * It is assumed that this code is called from the VCPU thread and that the | ||
181 | * VCPU therefore is not currently executing guest code. | ||
182 | */ | ||
183 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | ||
184 | { | ||
185 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | ||
186 | inject_abt32(vcpu, true, addr); | ||
187 | |||
188 | inject_abt64(vcpu, true, addr); | ||
189 | } | ||
190 | |||
191 | /** | ||
192 | * kvm_inject_undefined - inject an undefined instruction into the guest | ||
193 | * | ||
194 | * It is assumed that this code is called from the VCPU thread and that the | ||
195 | * VCPU therefore is not currently executing guest code. | ||
196 | */ | ||
197 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) | ||
198 | { | ||
199 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | ||
200 | inject_undef32(vcpu); | ||
201 | |||
202 | inject_undef64(vcpu); | ||
203 | } | ||
diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c new file mode 100644 index 000000000000..bbc6ae32e4af --- /dev/null +++ b/arch/arm64/kvm/regmap.c | |||
@@ -0,0 +1,168 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/kvm/emulate.c: | ||
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #include <linux/mm.h> | ||
23 | #include <linux/kvm_host.h> | ||
24 | #include <asm/kvm_emulate.h> | ||
25 | #include <asm/ptrace.h> | ||
26 | |||
27 | #define VCPU_NR_MODES 6 | ||
28 | #define REG_OFFSET(_reg) \ | ||
29 | (offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long)) | ||
30 | |||
31 | #define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R)) | ||
32 | |||
33 | static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = { | ||
34 | /* USR Registers */ | ||
35 | { | ||
36 | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | ||
37 | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | ||
38 | USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), | ||
39 | USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), | ||
40 | USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14), | ||
41 | REG_OFFSET(pc) | ||
42 | }, | ||
43 | |||
44 | /* FIQ Registers */ | ||
45 | { | ||
46 | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | ||
47 | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | ||
48 | USR_REG_OFFSET(6), USR_REG_OFFSET(7), | ||
49 | REG_OFFSET(compat_r8_fiq), /* r8 */ | ||
50 | REG_OFFSET(compat_r9_fiq), /* r9 */ | ||
51 | REG_OFFSET(compat_r10_fiq), /* r10 */ | ||
52 | REG_OFFSET(compat_r11_fiq), /* r11 */ | ||
53 | REG_OFFSET(compat_r12_fiq), /* r12 */ | ||
54 | REG_OFFSET(compat_sp_fiq), /* r13 */ | ||
55 | REG_OFFSET(compat_lr_fiq), /* r14 */ | ||
56 | REG_OFFSET(pc) | ||
57 | }, | ||
58 | |||
59 | /* IRQ Registers */ | ||
60 | { | ||
61 | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | ||
62 | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | ||
63 | USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), | ||
64 | USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), | ||
65 | USR_REG_OFFSET(12), | ||
66 | REG_OFFSET(compat_sp_irq), /* r13 */ | ||
67 | REG_OFFSET(compat_lr_irq), /* r14 */ | ||
68 | REG_OFFSET(pc) | ||
69 | }, | ||
70 | |||
71 | /* SVC Registers */ | ||
72 | { | ||
73 | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | ||
74 | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | ||
75 | USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), | ||
76 | USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), | ||
77 | USR_REG_OFFSET(12), | ||
78 | REG_OFFSET(compat_sp_svc), /* r13 */ | ||
79 | REG_OFFSET(compat_lr_svc), /* r14 */ | ||
80 | REG_OFFSET(pc) | ||
81 | }, | ||
82 | |||
83 | /* ABT Registers */ | ||
84 | { | ||
85 | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | ||
86 | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | ||
87 | USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), | ||
88 | USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), | ||
89 | USR_REG_OFFSET(12), | ||
90 | REG_OFFSET(compat_sp_abt), /* r13 */ | ||
91 | REG_OFFSET(compat_lr_abt), /* r14 */ | ||
92 | REG_OFFSET(pc) | ||
93 | }, | ||
94 | |||
95 | /* UND Registers */ | ||
96 | { | ||
97 | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | ||
98 | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | ||
99 | USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), | ||
100 | USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), | ||
101 | USR_REG_OFFSET(12), | ||
102 | REG_OFFSET(compat_sp_und), /* r13 */ | ||
103 | REG_OFFSET(compat_lr_und), /* r14 */ | ||
104 | REG_OFFSET(pc) | ||
105 | }, | ||
106 | }; | ||
107 | |||
108 | /* | ||
109 | * Return a pointer to the register number valid in the current mode of | ||
110 | * the virtual CPU. | ||
111 | */ | ||
112 | unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num) | ||
113 | { | ||
114 | unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs; | ||
115 | unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; | ||
116 | |||
117 | switch (mode) { | ||
118 | case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC: | ||
119 | mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */ | ||
120 | break; | ||
121 | |||
122 | case COMPAT_PSR_MODE_ABT: | ||
123 | mode = 4; | ||
124 | break; | ||
125 | |||
126 | case COMPAT_PSR_MODE_UND: | ||
127 | mode = 5; | ||
128 | break; | ||
129 | |||
130 | case COMPAT_PSR_MODE_SYS: | ||
131 | mode = 0; /* SYS maps to USR */ | ||
132 | break; | ||
133 | |||
134 | default: | ||
135 | BUG(); | ||
136 | } | ||
137 | |||
138 | return reg_array + vcpu_reg_offsets[mode][reg_num]; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * Return the SPSR for the current mode of the virtual CPU. | ||
143 | */ | ||
144 | unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu) | ||
145 | { | ||
146 | unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; | ||
147 | switch (mode) { | ||
148 | case COMPAT_PSR_MODE_SVC: | ||
149 | mode = KVM_SPSR_SVC; | ||
150 | break; | ||
151 | case COMPAT_PSR_MODE_ABT: | ||
152 | mode = KVM_SPSR_ABT; | ||
153 | break; | ||
154 | case COMPAT_PSR_MODE_UND: | ||
155 | mode = KVM_SPSR_UND; | ||
156 | break; | ||
157 | case COMPAT_PSR_MODE_IRQ: | ||
158 | mode = KVM_SPSR_IRQ; | ||
159 | break; | ||
160 | case COMPAT_PSR_MODE_FIQ: | ||
161 | mode = KVM_SPSR_FIQ; | ||
162 | break; | ||
163 | default: | ||
164 | BUG(); | ||
165 | } | ||
166 | |||
167 | return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[mode]; | ||
168 | } | ||
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c new file mode 100644 index 000000000000..70a7816535cd --- /dev/null +++ b/arch/arm64/kvm/reset.c | |||
@@ -0,0 +1,112 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/kvm/reset.c | ||
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License, version 2, as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #include <linux/errno.h> | ||
23 | #include <linux/kvm_host.h> | ||
24 | #include <linux/kvm.h> | ||
25 | |||
26 | #include <kvm/arm_arch_timer.h> | ||
27 | |||
28 | #include <asm/cputype.h> | ||
29 | #include <asm/ptrace.h> | ||
30 | #include <asm/kvm_arm.h> | ||
31 | #include <asm/kvm_coproc.h> | ||
32 | |||
33 | /* | ||
34 | * ARMv8 Reset Values | ||
35 | */ | ||
36 | static const struct kvm_regs default_regs_reset = { | ||
37 | .regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | | ||
38 | PSR_F_BIT | PSR_D_BIT), | ||
39 | }; | ||
40 | |||
41 | static const struct kvm_regs default_regs_reset32 = { | ||
42 | .regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT | | ||
43 | COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT), | ||
44 | }; | ||
45 | |||
46 | static const struct kvm_irq_level default_vtimer_irq = { | ||
47 | .irq = 27, | ||
48 | .level = 1, | ||
49 | }; | ||
50 | |||
51 | static bool cpu_has_32bit_el1(void) | ||
52 | { | ||
53 | u64 pfr0; | ||
54 | |||
55 | pfr0 = read_cpuid(ID_AA64PFR0_EL1); | ||
56 | return !!(pfr0 & 0x20); | ||
57 | } | ||
58 | |||
59 | int kvm_arch_dev_ioctl_check_extension(long ext) | ||
60 | { | ||
61 | int r; | ||
62 | |||
63 | switch (ext) { | ||
64 | case KVM_CAP_ARM_EL1_32BIT: | ||
65 | r = cpu_has_32bit_el1(); | ||
66 | break; | ||
67 | default: | ||
68 | r = 0; | ||
69 | } | ||
70 | |||
71 | return r; | ||
72 | } | ||
73 | |||
74 | /** | ||
75 | * kvm_reset_vcpu - sets core registers and sys_regs to reset value | ||
76 | * @vcpu: The VCPU pointer | ||
77 | * | ||
78 | * This function finds the right table above and sets the registers on | ||
79 | * the virtual CPU struct to their architectually defined reset | ||
80 | * values. | ||
81 | */ | ||
82 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | ||
83 | { | ||
84 | const struct kvm_irq_level *cpu_vtimer_irq; | ||
85 | const struct kvm_regs *cpu_reset; | ||
86 | |||
87 | switch (vcpu->arch.target) { | ||
88 | default: | ||
89 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { | ||
90 | if (!cpu_has_32bit_el1()) | ||
91 | return -EINVAL; | ||
92 | cpu_reset = &default_regs_reset32; | ||
93 | vcpu->arch.hcr_el2 &= ~HCR_RW; | ||
94 | } else { | ||
95 | cpu_reset = &default_regs_reset; | ||
96 | } | ||
97 | |||
98 | cpu_vtimer_irq = &default_vtimer_irq; | ||
99 | break; | ||
100 | } | ||
101 | |||
102 | /* Reset core registers */ | ||
103 | memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset)); | ||
104 | |||
105 | /* Reset system registers */ | ||
106 | kvm_reset_sys_regs(vcpu); | ||
107 | |||
108 | /* Reset timer */ | ||
109 | kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); | ||
110 | |||
111 | return 0; | ||
112 | } | ||
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c new file mode 100644 index 000000000000..94923609753b --- /dev/null +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -0,0 +1,1050 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/kvm/coproc.c: | ||
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
7 | * Authors: Rusty Russell <rusty@rustcorp.com.au> | ||
8 | * Christoffer Dall <c.dall@virtualopensystems.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License, version 2, as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
21 | */ | ||
22 | |||
23 | #include <linux/mm.h> | ||
24 | #include <linux/kvm_host.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | #include <asm/kvm_arm.h> | ||
27 | #include <asm/kvm_host.h> | ||
28 | #include <asm/kvm_emulate.h> | ||
29 | #include <asm/kvm_coproc.h> | ||
30 | #include <asm/cacheflush.h> | ||
31 | #include <asm/cputype.h> | ||
32 | #include <trace/events/kvm.h> | ||
33 | |||
34 | #include "sys_regs.h" | ||
35 | |||
36 | /* | ||
37 | * All of this file is extremly similar to the ARM coproc.c, but the | ||
38 | * types are different. My gut feeling is that it should be pretty | ||
39 | * easy to merge, but that would be an ABI breakage -- again. VFP | ||
40 | * would also need to be abstracted. | ||
41 | * | ||
42 | * For AArch32, we only take care of what is being trapped. Anything | ||
43 | * that has to do with init and userspace access has to go via the | ||
44 | * 64bit interface. | ||
45 | */ | ||
46 | |||
47 | /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ | ||
48 | static u32 cache_levels; | ||
49 | |||
50 | /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ | ||
51 | #define CSSELR_MAX 12 | ||
52 | |||
53 | /* Which cache CCSIDR represents depends on CSSELR value. */ | ||
54 | static u32 get_ccsidr(u32 csselr) | ||
55 | { | ||
56 | u32 ccsidr; | ||
57 | |||
58 | /* Make sure noone else changes CSSELR during this! */ | ||
59 | local_irq_disable(); | ||
60 | /* Put value into CSSELR */ | ||
61 | asm volatile("msr csselr_el1, %x0" : : "r" (csselr)); | ||
62 | isb(); | ||
63 | /* Read result out of CCSIDR */ | ||
64 | asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr)); | ||
65 | local_irq_enable(); | ||
66 | |||
67 | return ccsidr; | ||
68 | } | ||
69 | |||
70 | static void do_dc_cisw(u32 val) | ||
71 | { | ||
72 | asm volatile("dc cisw, %x0" : : "r" (val)); | ||
73 | dsb(); | ||
74 | } | ||
75 | |||
76 | static void do_dc_csw(u32 val) | ||
77 | { | ||
78 | asm volatile("dc csw, %x0" : : "r" (val)); | ||
79 | dsb(); | ||
80 | } | ||
81 | |||
82 | /* See note at ARM ARM B1.14.4 */ | ||
83 | static bool access_dcsw(struct kvm_vcpu *vcpu, | ||
84 | const struct sys_reg_params *p, | ||
85 | const struct sys_reg_desc *r) | ||
86 | { | ||
87 | unsigned long val; | ||
88 | int cpu; | ||
89 | |||
90 | if (!p->is_write) | ||
91 | return read_from_write_only(vcpu, p); | ||
92 | |||
93 | cpu = get_cpu(); | ||
94 | |||
95 | cpumask_setall(&vcpu->arch.require_dcache_flush); | ||
96 | cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); | ||
97 | |||
98 | /* If we were already preempted, take the long way around */ | ||
99 | if (cpu != vcpu->arch.last_pcpu) { | ||
100 | flush_cache_all(); | ||
101 | goto done; | ||
102 | } | ||
103 | |||
104 | val = *vcpu_reg(vcpu, p->Rt); | ||
105 | |||
106 | switch (p->CRm) { | ||
107 | case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ | ||
108 | case 14: /* DCCISW */ | ||
109 | do_dc_cisw(val); | ||
110 | break; | ||
111 | |||
112 | case 10: /* DCCSW */ | ||
113 | do_dc_csw(val); | ||
114 | break; | ||
115 | } | ||
116 | |||
117 | done: | ||
118 | put_cpu(); | ||
119 | |||
120 | return true; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * We could trap ID_DFR0 and tell the guest we don't support performance | ||
125 | * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was | ||
126 | * NAKed, so it will read the PMCR anyway. | ||
127 | * | ||
128 | * Therefore we tell the guest we have 0 counters. Unfortunately, we | ||
129 | * must always support PMCCNTR (the cycle counter): we just RAZ/WI for | ||
130 | * all PM registers, which doesn't crash the guest kernel at least. | ||
131 | */ | ||
132 | static bool pm_fake(struct kvm_vcpu *vcpu, | ||
133 | const struct sys_reg_params *p, | ||
134 | const struct sys_reg_desc *r) | ||
135 | { | ||
136 | if (p->is_write) | ||
137 | return ignore_write(vcpu, p); | ||
138 | else | ||
139 | return read_zero(vcpu, p); | ||
140 | } | ||
141 | |||
142 | static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | ||
143 | { | ||
144 | u64 amair; | ||
145 | |||
146 | asm volatile("mrs %0, amair_el1\n" : "=r" (amair)); | ||
147 | vcpu_sys_reg(vcpu, AMAIR_EL1) = amair; | ||
148 | } | ||
149 | |||
150 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | ||
151 | { | ||
152 | /* | ||
153 | * Simply map the vcpu_id into the Aff0 field of the MPIDR. | ||
154 | */ | ||
155 | vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff); | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * Architected system registers. | ||
160 | * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 | ||
161 | */ | ||
162 | static const struct sys_reg_desc sys_reg_descs[] = { | ||
163 | /* DC ISW */ | ||
164 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010), | ||
165 | access_dcsw }, | ||
166 | /* DC CSW */ | ||
167 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010), | ||
168 | access_dcsw }, | ||
169 | /* DC CISW */ | ||
170 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), | ||
171 | access_dcsw }, | ||
172 | |||
173 | /* TEECR32_EL1 */ | ||
174 | { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), | ||
175 | NULL, reset_val, TEECR32_EL1, 0 }, | ||
176 | /* TEEHBR32_EL1 */ | ||
177 | { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000), | ||
178 | NULL, reset_val, TEEHBR32_EL1, 0 }, | ||
179 | /* DBGVCR32_EL2 */ | ||
180 | { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000), | ||
181 | NULL, reset_val, DBGVCR32_EL2, 0 }, | ||
182 | |||
183 | /* MPIDR_EL1 */ | ||
184 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101), | ||
185 | NULL, reset_mpidr, MPIDR_EL1 }, | ||
186 | /* SCTLR_EL1 */ | ||
187 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), | ||
188 | NULL, reset_val, SCTLR_EL1, 0x00C50078 }, | ||
189 | /* CPACR_EL1 */ | ||
190 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), | ||
191 | NULL, reset_val, CPACR_EL1, 0 }, | ||
192 | /* TTBR0_EL1 */ | ||
193 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000), | ||
194 | NULL, reset_unknown, TTBR0_EL1 }, | ||
195 | /* TTBR1_EL1 */ | ||
196 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001), | ||
197 | NULL, reset_unknown, TTBR1_EL1 }, | ||
198 | /* TCR_EL1 */ | ||
199 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010), | ||
200 | NULL, reset_val, TCR_EL1, 0 }, | ||
201 | |||
202 | /* AFSR0_EL1 */ | ||
203 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000), | ||
204 | NULL, reset_unknown, AFSR0_EL1 }, | ||
205 | /* AFSR1_EL1 */ | ||
206 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001), | ||
207 | NULL, reset_unknown, AFSR1_EL1 }, | ||
208 | /* ESR_EL1 */ | ||
209 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000), | ||
210 | NULL, reset_unknown, ESR_EL1 }, | ||
211 | /* FAR_EL1 */ | ||
212 | { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), | ||
213 | NULL, reset_unknown, FAR_EL1 }, | ||
214 | |||
215 | /* PMINTENSET_EL1 */ | ||
216 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), | ||
217 | pm_fake }, | ||
218 | /* PMINTENCLR_EL1 */ | ||
219 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), | ||
220 | pm_fake }, | ||
221 | |||
222 | /* MAIR_EL1 */ | ||
223 | { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), | ||
224 | NULL, reset_unknown, MAIR_EL1 }, | ||
225 | /* AMAIR_EL1 */ | ||
226 | { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000), | ||
227 | NULL, reset_amair_el1, AMAIR_EL1 }, | ||
228 | |||
229 | /* VBAR_EL1 */ | ||
230 | { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), | ||
231 | NULL, reset_val, VBAR_EL1, 0 }, | ||
232 | /* CONTEXTIDR_EL1 */ | ||
233 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), | ||
234 | NULL, reset_val, CONTEXTIDR_EL1, 0 }, | ||
235 | /* TPIDR_EL1 */ | ||
236 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100), | ||
237 | NULL, reset_unknown, TPIDR_EL1 }, | ||
238 | |||
239 | /* CNTKCTL_EL1 */ | ||
240 | { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000), | ||
241 | NULL, reset_val, CNTKCTL_EL1, 0}, | ||
242 | |||
243 | /* CSSELR_EL1 */ | ||
244 | { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), | ||
245 | NULL, reset_unknown, CSSELR_EL1 }, | ||
246 | |||
247 | /* PMCR_EL0 */ | ||
248 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), | ||
249 | pm_fake }, | ||
250 | /* PMCNTENSET_EL0 */ | ||
251 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), | ||
252 | pm_fake }, | ||
253 | /* PMCNTENCLR_EL0 */ | ||
254 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), | ||
255 | pm_fake }, | ||
256 | /* PMOVSCLR_EL0 */ | ||
257 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), | ||
258 | pm_fake }, | ||
259 | /* PMSWINC_EL0 */ | ||
260 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), | ||
261 | pm_fake }, | ||
262 | /* PMSELR_EL0 */ | ||
263 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), | ||
264 | pm_fake }, | ||
265 | /* PMCEID0_EL0 */ | ||
266 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), | ||
267 | pm_fake }, | ||
268 | /* PMCEID1_EL0 */ | ||
269 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), | ||
270 | pm_fake }, | ||
271 | /* PMCCNTR_EL0 */ | ||
272 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), | ||
273 | pm_fake }, | ||
274 | /* PMXEVTYPER_EL0 */ | ||
275 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), | ||
276 | pm_fake }, | ||
277 | /* PMXEVCNTR_EL0 */ | ||
278 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), | ||
279 | pm_fake }, | ||
280 | /* PMUSERENR_EL0 */ | ||
281 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), | ||
282 | pm_fake }, | ||
283 | /* PMOVSSET_EL0 */ | ||
284 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), | ||
285 | pm_fake }, | ||
286 | |||
287 | /* TPIDR_EL0 */ | ||
288 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), | ||
289 | NULL, reset_unknown, TPIDR_EL0 }, | ||
290 | /* TPIDRRO_EL0 */ | ||
291 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), | ||
292 | NULL, reset_unknown, TPIDRRO_EL0 }, | ||
293 | |||
294 | /* DACR32_EL2 */ | ||
295 | { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), | ||
296 | NULL, reset_unknown, DACR32_EL2 }, | ||
297 | /* IFSR32_EL2 */ | ||
298 | { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001), | ||
299 | NULL, reset_unknown, IFSR32_EL2 }, | ||
300 | /* FPEXC32_EL2 */ | ||
301 | { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000), | ||
302 | NULL, reset_val, FPEXC32_EL2, 0x70 }, | ||
303 | }; | ||
304 | |||
305 | /* Trapped cp15 registers */ | ||
306 | static const struct sys_reg_desc cp15_regs[] = { | ||
307 | /* | ||
308 | * DC{C,I,CI}SW operations: | ||
309 | */ | ||
310 | { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, | ||
311 | { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, | ||
312 | { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, | ||
313 | { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake }, | ||
314 | { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake }, | ||
315 | { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake }, | ||
316 | { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake }, | ||
317 | { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake }, | ||
318 | { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake }, | ||
319 | { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake }, | ||
320 | { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake }, | ||
321 | { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake }, | ||
322 | { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake }, | ||
323 | { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake }, | ||
324 | { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake }, | ||
325 | { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake }, | ||
326 | }; | ||
327 | |||
328 | /* Target specific emulation tables */ | ||
329 | static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS]; | ||
330 | |||
331 | void kvm_register_target_sys_reg_table(unsigned int target, | ||
332 | struct kvm_sys_reg_target_table *table) | ||
333 | { | ||
334 | target_tables[target] = table; | ||
335 | } | ||
336 | |||
337 | /* Get specific register table for this target. */ | ||
338 | static const struct sys_reg_desc *get_target_table(unsigned target, | ||
339 | bool mode_is_64, | ||
340 | size_t *num) | ||
341 | { | ||
342 | struct kvm_sys_reg_target_table *table; | ||
343 | |||
344 | table = target_tables[target]; | ||
345 | if (mode_is_64) { | ||
346 | *num = table->table64.num; | ||
347 | return table->table64.table; | ||
348 | } else { | ||
349 | *num = table->table32.num; | ||
350 | return table->table32.table; | ||
351 | } | ||
352 | } | ||
353 | |||
354 | static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, | ||
355 | const struct sys_reg_desc table[], | ||
356 | unsigned int num) | ||
357 | { | ||
358 | unsigned int i; | ||
359 | |||
360 | for (i = 0; i < num; i++) { | ||
361 | const struct sys_reg_desc *r = &table[i]; | ||
362 | |||
363 | if (params->Op0 != r->Op0) | ||
364 | continue; | ||
365 | if (params->Op1 != r->Op1) | ||
366 | continue; | ||
367 | if (params->CRn != r->CRn) | ||
368 | continue; | ||
369 | if (params->CRm != r->CRm) | ||
370 | continue; | ||
371 | if (params->Op2 != r->Op2) | ||
372 | continue; | ||
373 | |||
374 | return r; | ||
375 | } | ||
376 | return NULL; | ||
377 | } | ||
378 | |||
379 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
380 | { | ||
381 | kvm_inject_undefined(vcpu); | ||
382 | return 1; | ||
383 | } | ||
384 | |||
385 | int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
386 | { | ||
387 | kvm_inject_undefined(vcpu); | ||
388 | return 1; | ||
389 | } | ||
390 | |||
391 | static void emulate_cp15(struct kvm_vcpu *vcpu, | ||
392 | const struct sys_reg_params *params) | ||
393 | { | ||
394 | size_t num; | ||
395 | const struct sys_reg_desc *table, *r; | ||
396 | |||
397 | table = get_target_table(vcpu->arch.target, false, &num); | ||
398 | |||
399 | /* Search target-specific then generic table. */ | ||
400 | r = find_reg(params, table, num); | ||
401 | if (!r) | ||
402 | r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); | ||
403 | |||
404 | if (likely(r)) { | ||
405 | /* | ||
406 | * Not having an accessor means that we have | ||
407 | * configured a trap that we don't know how to | ||
408 | * handle. This certainly qualifies as a gross bug | ||
409 | * that should be fixed right away. | ||
410 | */ | ||
411 | BUG_ON(!r->access); | ||
412 | |||
413 | if (likely(r->access(vcpu, params, r))) { | ||
414 | /* Skip instruction, since it was emulated */ | ||
415 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | ||
416 | return; | ||
417 | } | ||
418 | /* If access function fails, it should complain. */ | ||
419 | } | ||
420 | |||
421 | kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu)); | ||
422 | print_sys_reg_instr(params); | ||
423 | kvm_inject_undefined(vcpu); | ||
424 | } | ||
425 | |||
426 | /** | ||
427 | * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access | ||
428 | * @vcpu: The VCPU pointer | ||
429 | * @run: The kvm_run struct | ||
430 | */ | ||
431 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
432 | { | ||
433 | struct sys_reg_params params; | ||
434 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | ||
435 | int Rt2 = (hsr >> 10) & 0xf; | ||
436 | |||
437 | params.CRm = (hsr >> 1) & 0xf; | ||
438 | params.Rt = (hsr >> 5) & 0xf; | ||
439 | params.is_write = ((hsr & 1) == 0); | ||
440 | |||
441 | params.Op0 = 0; | ||
442 | params.Op1 = (hsr >> 16) & 0xf; | ||
443 | params.Op2 = 0; | ||
444 | params.CRn = 0; | ||
445 | |||
446 | /* | ||
447 | * Massive hack here. Store Rt2 in the top 32bits so we only | ||
448 | * have one register to deal with. As we use the same trap | ||
449 | * backends between AArch32 and AArch64, we get away with it. | ||
450 | */ | ||
451 | if (params.is_write) { | ||
452 | u64 val = *vcpu_reg(vcpu, params.Rt); | ||
453 | val &= 0xffffffff; | ||
454 | val |= *vcpu_reg(vcpu, Rt2) << 32; | ||
455 | *vcpu_reg(vcpu, params.Rt) = val; | ||
456 | } | ||
457 | |||
458 | emulate_cp15(vcpu, ¶ms); | ||
459 | |||
460 | /* Do the opposite hack for the read side */ | ||
461 | if (!params.is_write) { | ||
462 | u64 val = *vcpu_reg(vcpu, params.Rt); | ||
463 | val >>= 32; | ||
464 | *vcpu_reg(vcpu, Rt2) = val; | ||
465 | } | ||
466 | |||
467 | return 1; | ||
468 | } | ||
469 | |||
470 | /** | ||
471 | * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access | ||
472 | * @vcpu: The VCPU pointer | ||
473 | * @run: The kvm_run struct | ||
474 | */ | ||
475 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
476 | { | ||
477 | struct sys_reg_params params; | ||
478 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | ||
479 | |||
480 | params.CRm = (hsr >> 1) & 0xf; | ||
481 | params.Rt = (hsr >> 5) & 0xf; | ||
482 | params.is_write = ((hsr & 1) == 0); | ||
483 | params.CRn = (hsr >> 10) & 0xf; | ||
484 | params.Op0 = 0; | ||
485 | params.Op1 = (hsr >> 14) & 0x7; | ||
486 | params.Op2 = (hsr >> 17) & 0x7; | ||
487 | |||
488 | emulate_cp15(vcpu, ¶ms); | ||
489 | return 1; | ||
490 | } | ||
491 | |||
492 | static int emulate_sys_reg(struct kvm_vcpu *vcpu, | ||
493 | const struct sys_reg_params *params) | ||
494 | { | ||
495 | size_t num; | ||
496 | const struct sys_reg_desc *table, *r; | ||
497 | |||
498 | table = get_target_table(vcpu->arch.target, true, &num); | ||
499 | |||
500 | /* Search target-specific then generic table. */ | ||
501 | r = find_reg(params, table, num); | ||
502 | if (!r) | ||
503 | r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | ||
504 | |||
505 | if (likely(r)) { | ||
506 | /* | ||
507 | * Not having an accessor means that we have | ||
508 | * configured a trap that we don't know how to | ||
509 | * handle. This certainly qualifies as a gross bug | ||
510 | * that should be fixed right away. | ||
511 | */ | ||
512 | BUG_ON(!r->access); | ||
513 | |||
514 | if (likely(r->access(vcpu, params, r))) { | ||
515 | /* Skip instruction, since it was emulated */ | ||
516 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | ||
517 | return 1; | ||
518 | } | ||
519 | /* If access function fails, it should complain. */ | ||
520 | } else { | ||
521 | kvm_err("Unsupported guest sys_reg access at: %lx\n", | ||
522 | *vcpu_pc(vcpu)); | ||
523 | print_sys_reg_instr(params); | ||
524 | } | ||
525 | kvm_inject_undefined(vcpu); | ||
526 | return 1; | ||
527 | } | ||
528 | |||
529 | static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, | ||
530 | const struct sys_reg_desc *table, size_t num) | ||
531 | { | ||
532 | unsigned long i; | ||
533 | |||
534 | for (i = 0; i < num; i++) | ||
535 | if (table[i].reset) | ||
536 | table[i].reset(vcpu, &table[i]); | ||
537 | } | ||
538 | |||
539 | /** | ||
540 | * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access | ||
541 | * @vcpu: The VCPU pointer | ||
542 | * @run: The kvm_run struct | ||
543 | */ | ||
544 | int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
545 | { | ||
546 | struct sys_reg_params params; | ||
547 | unsigned long esr = kvm_vcpu_get_hsr(vcpu); | ||
548 | |||
549 | params.Op0 = (esr >> 20) & 3; | ||
550 | params.Op1 = (esr >> 14) & 0x7; | ||
551 | params.CRn = (esr >> 10) & 0xf; | ||
552 | params.CRm = (esr >> 1) & 0xf; | ||
553 | params.Op2 = (esr >> 17) & 0x7; | ||
554 | params.Rt = (esr >> 5) & 0x1f; | ||
555 | params.is_write = !(esr & 1); | ||
556 | |||
557 | return emulate_sys_reg(vcpu, ¶ms); | ||
558 | } | ||
559 | |||
560 | /****************************************************************************** | ||
561 | * Userspace API | ||
562 | *****************************************************************************/ | ||
563 | |||
564 | static bool index_to_params(u64 id, struct sys_reg_params *params) | ||
565 | { | ||
566 | switch (id & KVM_REG_SIZE_MASK) { | ||
567 | case KVM_REG_SIZE_U64: | ||
568 | /* Any unused index bits means it's not valid. */ | ||
569 | if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | ||
570 | | KVM_REG_ARM_COPROC_MASK | ||
571 | | KVM_REG_ARM64_SYSREG_OP0_MASK | ||
572 | | KVM_REG_ARM64_SYSREG_OP1_MASK | ||
573 | | KVM_REG_ARM64_SYSREG_CRN_MASK | ||
574 | | KVM_REG_ARM64_SYSREG_CRM_MASK | ||
575 | | KVM_REG_ARM64_SYSREG_OP2_MASK)) | ||
576 | return false; | ||
577 | params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) | ||
578 | >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); | ||
579 | params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) | ||
580 | >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); | ||
581 | params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) | ||
582 | >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); | ||
583 | params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) | ||
584 | >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); | ||
585 | params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) | ||
586 | >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); | ||
587 | return true; | ||
588 | default: | ||
589 | return false; | ||
590 | } | ||
591 | } | ||
592 | |||
593 | /* Decode an index value, and find the sys_reg_desc entry. */ | ||
594 | static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, | ||
595 | u64 id) | ||
596 | { | ||
597 | size_t num; | ||
598 | const struct sys_reg_desc *table, *r; | ||
599 | struct sys_reg_params params; | ||
600 | |||
601 | /* We only do sys_reg for now. */ | ||
602 | if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) | ||
603 | return NULL; | ||
604 | |||
605 | if (!index_to_params(id, ¶ms)) | ||
606 | return NULL; | ||
607 | |||
608 | table = get_target_table(vcpu->arch.target, true, &num); | ||
609 | r = find_reg(¶ms, table, num); | ||
610 | if (!r) | ||
611 | r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | ||
612 | |||
613 | /* Not saved in the sys_reg array? */ | ||
614 | if (r && !r->reg) | ||
615 | r = NULL; | ||
616 | |||
617 | return r; | ||
618 | } | ||
619 | |||
620 | /* | ||
621 | * These are the invariant sys_reg registers: we let the guest see the | ||
622 | * host versions of these, so they're part of the guest state. | ||
623 | * | ||
624 | * A future CPU may provide a mechanism to present different values to | ||
625 | * the guest, or a future kvm may trap them. | ||
626 | */ | ||
627 | |||
628 | #define FUNCTION_INVARIANT(reg) \ | ||
629 | static void get_##reg(struct kvm_vcpu *v, \ | ||
630 | const struct sys_reg_desc *r) \ | ||
631 | { \ | ||
632 | u64 val; \ | ||
633 | \ | ||
634 | asm volatile("mrs %0, " __stringify(reg) "\n" \ | ||
635 | : "=r" (val)); \ | ||
636 | ((struct sys_reg_desc *)r)->val = val; \ | ||
637 | } | ||
638 | |||
639 | FUNCTION_INVARIANT(midr_el1) | ||
640 | FUNCTION_INVARIANT(ctr_el0) | ||
641 | FUNCTION_INVARIANT(revidr_el1) | ||
642 | FUNCTION_INVARIANT(id_pfr0_el1) | ||
643 | FUNCTION_INVARIANT(id_pfr1_el1) | ||
644 | FUNCTION_INVARIANT(id_dfr0_el1) | ||
645 | FUNCTION_INVARIANT(id_afr0_el1) | ||
646 | FUNCTION_INVARIANT(id_mmfr0_el1) | ||
647 | FUNCTION_INVARIANT(id_mmfr1_el1) | ||
648 | FUNCTION_INVARIANT(id_mmfr2_el1) | ||
649 | FUNCTION_INVARIANT(id_mmfr3_el1) | ||
650 | FUNCTION_INVARIANT(id_isar0_el1) | ||
651 | FUNCTION_INVARIANT(id_isar1_el1) | ||
652 | FUNCTION_INVARIANT(id_isar2_el1) | ||
653 | FUNCTION_INVARIANT(id_isar3_el1) | ||
654 | FUNCTION_INVARIANT(id_isar4_el1) | ||
655 | FUNCTION_INVARIANT(id_isar5_el1) | ||
656 | FUNCTION_INVARIANT(clidr_el1) | ||
657 | FUNCTION_INVARIANT(aidr_el1) | ||
658 | |||
659 | /* ->val is filled in by kvm_sys_reg_table_init() */ | ||
660 | static struct sys_reg_desc invariant_sys_regs[] = { | ||
661 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000), | ||
662 | NULL, get_midr_el1 }, | ||
663 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110), | ||
664 | NULL, get_revidr_el1 }, | ||
665 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000), | ||
666 | NULL, get_id_pfr0_el1 }, | ||
667 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001), | ||
668 | NULL, get_id_pfr1_el1 }, | ||
669 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010), | ||
670 | NULL, get_id_dfr0_el1 }, | ||
671 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011), | ||
672 | NULL, get_id_afr0_el1 }, | ||
673 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100), | ||
674 | NULL, get_id_mmfr0_el1 }, | ||
675 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101), | ||
676 | NULL, get_id_mmfr1_el1 }, | ||
677 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110), | ||
678 | NULL, get_id_mmfr2_el1 }, | ||
679 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111), | ||
680 | NULL, get_id_mmfr3_el1 }, | ||
681 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), | ||
682 | NULL, get_id_isar0_el1 }, | ||
683 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001), | ||
684 | NULL, get_id_isar1_el1 }, | ||
685 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), | ||
686 | NULL, get_id_isar2_el1 }, | ||
687 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011), | ||
688 | NULL, get_id_isar3_el1 }, | ||
689 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100), | ||
690 | NULL, get_id_isar4_el1 }, | ||
691 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101), | ||
692 | NULL, get_id_isar5_el1 }, | ||
693 | { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001), | ||
694 | NULL, get_clidr_el1 }, | ||
695 | { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111), | ||
696 | NULL, get_aidr_el1 }, | ||
697 | { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001), | ||
698 | NULL, get_ctr_el0 }, | ||
699 | }; | ||
700 | |||
701 | static int reg_from_user(void *val, const void __user *uaddr, u64 id) | ||
702 | { | ||
703 | /* This Just Works because we are little endian. */ | ||
704 | if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) | ||
705 | return -EFAULT; | ||
706 | return 0; | ||
707 | } | ||
708 | |||
709 | static int reg_to_user(void __user *uaddr, const void *val, u64 id) | ||
710 | { | ||
711 | /* This Just Works because we are little endian. */ | ||
712 | if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) | ||
713 | return -EFAULT; | ||
714 | return 0; | ||
715 | } | ||
716 | |||
717 | static int get_invariant_sys_reg(u64 id, void __user *uaddr) | ||
718 | { | ||
719 | struct sys_reg_params params; | ||
720 | const struct sys_reg_desc *r; | ||
721 | |||
722 | if (!index_to_params(id, ¶ms)) | ||
723 | return -ENOENT; | ||
724 | |||
725 | r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); | ||
726 | if (!r) | ||
727 | return -ENOENT; | ||
728 | |||
729 | return reg_to_user(uaddr, &r->val, id); | ||
730 | } | ||
731 | |||
732 | static int set_invariant_sys_reg(u64 id, void __user *uaddr) | ||
733 | { | ||
734 | struct sys_reg_params params; | ||
735 | const struct sys_reg_desc *r; | ||
736 | int err; | ||
737 | u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ | ||
738 | |||
739 | if (!index_to_params(id, ¶ms)) | ||
740 | return -ENOENT; | ||
741 | r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); | ||
742 | if (!r) | ||
743 | return -ENOENT; | ||
744 | |||
745 | err = reg_from_user(&val, uaddr, id); | ||
746 | if (err) | ||
747 | return err; | ||
748 | |||
749 | /* This is what we mean by invariant: you can't change it. */ | ||
750 | if (r->val != val) | ||
751 | return -EINVAL; | ||
752 | |||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | static bool is_valid_cache(u32 val) | ||
757 | { | ||
758 | u32 level, ctype; | ||
759 | |||
760 | if (val >= CSSELR_MAX) | ||
761 | return -ENOENT; | ||
762 | |||
763 | /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ | ||
764 | level = (val >> 1); | ||
765 | ctype = (cache_levels >> (level * 3)) & 7; | ||
766 | |||
767 | switch (ctype) { | ||
768 | case 0: /* No cache */ | ||
769 | return false; | ||
770 | case 1: /* Instruction cache only */ | ||
771 | return (val & 1); | ||
772 | case 2: /* Data cache only */ | ||
773 | case 4: /* Unified cache */ | ||
774 | return !(val & 1); | ||
775 | case 3: /* Separate instruction and data caches */ | ||
776 | return true; | ||
777 | default: /* Reserved: we can't know instruction or data. */ | ||
778 | return false; | ||
779 | } | ||
780 | } | ||
781 | |||
782 | static int demux_c15_get(u64 id, void __user *uaddr) | ||
783 | { | ||
784 | u32 val; | ||
785 | u32 __user *uval = uaddr; | ||
786 | |||
787 | /* Fail if we have unknown bits set. */ | ||
788 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ||
789 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | ||
790 | return -ENOENT; | ||
791 | |||
792 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | ||
793 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | ||
794 | if (KVM_REG_SIZE(id) != 4) | ||
795 | return -ENOENT; | ||
796 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | ||
797 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | ||
798 | if (!is_valid_cache(val)) | ||
799 | return -ENOENT; | ||
800 | |||
801 | return put_user(get_ccsidr(val), uval); | ||
802 | default: | ||
803 | return -ENOENT; | ||
804 | } | ||
805 | } | ||
806 | |||
807 | static int demux_c15_set(u64 id, void __user *uaddr) | ||
808 | { | ||
809 | u32 val, newval; | ||
810 | u32 __user *uval = uaddr; | ||
811 | |||
812 | /* Fail if we have unknown bits set. */ | ||
813 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ||
814 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | ||
815 | return -ENOENT; | ||
816 | |||
817 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | ||
818 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | ||
819 | if (KVM_REG_SIZE(id) != 4) | ||
820 | return -ENOENT; | ||
821 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | ||
822 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | ||
823 | if (!is_valid_cache(val)) | ||
824 | return -ENOENT; | ||
825 | |||
826 | if (get_user(newval, uval)) | ||
827 | return -EFAULT; | ||
828 | |||
829 | /* This is also invariant: you can't change it. */ | ||
830 | if (newval != get_ccsidr(val)) | ||
831 | return -EINVAL; | ||
832 | return 0; | ||
833 | default: | ||
834 | return -ENOENT; | ||
835 | } | ||
836 | } | ||
837 | |||
838 | int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
839 | { | ||
840 | const struct sys_reg_desc *r; | ||
841 | void __user *uaddr = (void __user *)(unsigned long)reg->addr; | ||
842 | |||
843 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | ||
844 | return demux_c15_get(reg->id, uaddr); | ||
845 | |||
846 | if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) | ||
847 | return -ENOENT; | ||
848 | |||
849 | r = index_to_sys_reg_desc(vcpu, reg->id); | ||
850 | if (!r) | ||
851 | return get_invariant_sys_reg(reg->id, uaddr); | ||
852 | |||
853 | return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); | ||
854 | } | ||
855 | |||
856 | int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
857 | { | ||
858 | const struct sys_reg_desc *r; | ||
859 | void __user *uaddr = (void __user *)(unsigned long)reg->addr; | ||
860 | |||
861 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | ||
862 | return demux_c15_set(reg->id, uaddr); | ||
863 | |||
864 | if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) | ||
865 | return -ENOENT; | ||
866 | |||
867 | r = index_to_sys_reg_desc(vcpu, reg->id); | ||
868 | if (!r) | ||
869 | return set_invariant_sys_reg(reg->id, uaddr); | ||
870 | |||
871 | return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); | ||
872 | } | ||
873 | |||
874 | static unsigned int num_demux_regs(void) | ||
875 | { | ||
876 | unsigned int i, count = 0; | ||
877 | |||
878 | for (i = 0; i < CSSELR_MAX; i++) | ||
879 | if (is_valid_cache(i)) | ||
880 | count++; | ||
881 | |||
882 | return count; | ||
883 | } | ||
884 | |||
885 | static int write_demux_regids(u64 __user *uindices) | ||
886 | { | ||
887 | u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; | ||
888 | unsigned int i; | ||
889 | |||
890 | val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; | ||
891 | for (i = 0; i < CSSELR_MAX; i++) { | ||
892 | if (!is_valid_cache(i)) | ||
893 | continue; | ||
894 | if (put_user(val | i, uindices)) | ||
895 | return -EFAULT; | ||
896 | uindices++; | ||
897 | } | ||
898 | return 0; | ||
899 | } | ||
900 | |||
901 | static u64 sys_reg_to_index(const struct sys_reg_desc *reg) | ||
902 | { | ||
903 | return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | | ||
904 | KVM_REG_ARM64_SYSREG | | ||
905 | (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | | ||
906 | (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | | ||
907 | (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | | ||
908 | (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | | ||
909 | (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); | ||
910 | } | ||
911 | |||
912 | static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) | ||
913 | { | ||
914 | if (!*uind) | ||
915 | return true; | ||
916 | |||
917 | if (put_user(sys_reg_to_index(reg), *uind)) | ||
918 | return false; | ||
919 | |||
920 | (*uind)++; | ||
921 | return true; | ||
922 | } | ||
923 | |||
924 | /* Assumed ordered tables, see kvm_sys_reg_table_init. */ | ||
925 | static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) | ||
926 | { | ||
927 | const struct sys_reg_desc *i1, *i2, *end1, *end2; | ||
928 | unsigned int total = 0; | ||
929 | size_t num; | ||
930 | |||
931 | /* We check for duplicates here, to allow arch-specific overrides. */ | ||
932 | i1 = get_target_table(vcpu->arch.target, true, &num); | ||
933 | end1 = i1 + num; | ||
934 | i2 = sys_reg_descs; | ||
935 | end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); | ||
936 | |||
937 | BUG_ON(i1 == end1 || i2 == end2); | ||
938 | |||
939 | /* Walk carefully, as both tables may refer to the same register. */ | ||
940 | while (i1 || i2) { | ||
941 | int cmp = cmp_sys_reg(i1, i2); | ||
942 | /* target-specific overrides generic entry. */ | ||
943 | if (cmp <= 0) { | ||
944 | /* Ignore registers we trap but don't save. */ | ||
945 | if (i1->reg) { | ||
946 | if (!copy_reg_to_user(i1, &uind)) | ||
947 | return -EFAULT; | ||
948 | total++; | ||
949 | } | ||
950 | } else { | ||
951 | /* Ignore registers we trap but don't save. */ | ||
952 | if (i2->reg) { | ||
953 | if (!copy_reg_to_user(i2, &uind)) | ||
954 | return -EFAULT; | ||
955 | total++; | ||
956 | } | ||
957 | } | ||
958 | |||
959 | if (cmp <= 0 && ++i1 == end1) | ||
960 | i1 = NULL; | ||
961 | if (cmp >= 0 && ++i2 == end2) | ||
962 | i2 = NULL; | ||
963 | } | ||
964 | return total; | ||
965 | } | ||
966 | |||
967 | unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) | ||
968 | { | ||
969 | return ARRAY_SIZE(invariant_sys_regs) | ||
970 | + num_demux_regs() | ||
971 | + walk_sys_regs(vcpu, (u64 __user *)NULL); | ||
972 | } | ||
973 | |||
974 | int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | ||
975 | { | ||
976 | unsigned int i; | ||
977 | int err; | ||
978 | |||
979 | /* Then give them all the invariant registers' indices. */ | ||
980 | for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { | ||
981 | if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) | ||
982 | return -EFAULT; | ||
983 | uindices++; | ||
984 | } | ||
985 | |||
986 | err = walk_sys_regs(vcpu, uindices); | ||
987 | if (err < 0) | ||
988 | return err; | ||
989 | uindices += err; | ||
990 | |||
991 | return write_demux_regids(uindices); | ||
992 | } | ||
993 | |||
994 | void kvm_sys_reg_table_init(void) | ||
995 | { | ||
996 | unsigned int i; | ||
997 | struct sys_reg_desc clidr; | ||
998 | |||
999 | /* Make sure tables are unique and in order. */ | ||
1000 | for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++) | ||
1001 | BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0); | ||
1002 | |||
1003 | /* We abuse the reset function to overwrite the table itself. */ | ||
1004 | for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) | ||
1005 | invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); | ||
1006 | |||
1007 | /* | ||
1008 | * CLIDR format is awkward, so clean it up. See ARM B4.1.20: | ||
1009 | * | ||
1010 | * If software reads the Cache Type fields from Ctype1 | ||
1011 | * upwards, once it has seen a value of 0b000, no caches | ||
1012 | * exist at further-out levels of the hierarchy. So, for | ||
1013 | * example, if Ctype3 is the first Cache Type field with a | ||
1014 | * value of 0b000, the values of Ctype4 to Ctype7 must be | ||
1015 | * ignored. | ||
1016 | */ | ||
1017 | get_clidr_el1(NULL, &clidr); /* Ugly... */ | ||
1018 | cache_levels = clidr.val; | ||
1019 | for (i = 0; i < 7; i++) | ||
1020 | if (((cache_levels >> (i*3)) & 7) == 0) | ||
1021 | break; | ||
1022 | /* Clear all higher bits. */ | ||
1023 | cache_levels &= (1 << (i*3))-1; | ||
1024 | } | ||
1025 | |||
1026 | /** | ||
1027 | * kvm_reset_sys_regs - sets system registers to reset value | ||
1028 | * @vcpu: The VCPU pointer | ||
1029 | * | ||
1030 | * This function finds the right table above and sets the registers on the | ||
1031 | * virtual CPU struct to their architecturally defined reset values. | ||
1032 | */ | ||
1033 | void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) | ||
1034 | { | ||
1035 | size_t num; | ||
1036 | const struct sys_reg_desc *table; | ||
1037 | |||
1038 | /* Catch someone adding a register without putting in reset entry. */ | ||
1039 | memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); | ||
1040 | |||
1041 | /* Generic chip reset first (so target could override). */ | ||
1042 | reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | ||
1043 | |||
1044 | table = get_target_table(vcpu->arch.target, true, &num); | ||
1045 | reset_sys_reg_descs(vcpu, table, num); | ||
1046 | |||
1047 | for (num = 1; num < NR_SYS_REGS; num++) | ||
1048 | if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242) | ||
1049 | panic("Didn't reset vcpu_sys_reg(%zi)", num); | ||
1050 | } | ||
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h new file mode 100644 index 000000000000..d50d3722998e --- /dev/null +++ b/arch/arm64/kvm/sys_regs.h | |||
@@ -0,0 +1,138 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/kvm/coproc.h | ||
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
7 | * Authors: Christoffer Dall <c.dall@virtualopensystems.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License, version 2, as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #ifndef __ARM64_KVM_SYS_REGS_LOCAL_H__ | ||
23 | #define __ARM64_KVM_SYS_REGS_LOCAL_H__ | ||
24 | |||
25 | struct sys_reg_params { | ||
26 | u8 Op0; | ||
27 | u8 Op1; | ||
28 | u8 CRn; | ||
29 | u8 CRm; | ||
30 | u8 Op2; | ||
31 | u8 Rt; | ||
32 | bool is_write; | ||
33 | }; | ||
34 | |||
35 | struct sys_reg_desc { | ||
36 | /* MRS/MSR instruction which accesses it. */ | ||
37 | u8 Op0; | ||
38 | u8 Op1; | ||
39 | u8 CRn; | ||
40 | u8 CRm; | ||
41 | u8 Op2; | ||
42 | |||
43 | /* Trapped access from guest, if non-NULL. */ | ||
44 | bool (*access)(struct kvm_vcpu *, | ||
45 | const struct sys_reg_params *, | ||
46 | const struct sys_reg_desc *); | ||
47 | |||
48 | /* Initialization for vcpu. */ | ||
49 | void (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *); | ||
50 | |||
51 | /* Index into sys_reg[], or 0 if we don't need to save it. */ | ||
52 | int reg; | ||
53 | |||
54 | /* Value (usually reset value) */ | ||
55 | u64 val; | ||
56 | }; | ||
57 | |||
58 | static inline void print_sys_reg_instr(const struct sys_reg_params *p) | ||
59 | { | ||
60 | /* Look, we even formatted it for you to paste into the table! */ | ||
61 | kvm_pr_unimpl(" { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n", | ||
62 | p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read"); | ||
63 | } | ||
64 | |||
65 | static inline bool ignore_write(struct kvm_vcpu *vcpu, | ||
66 | const struct sys_reg_params *p) | ||
67 | { | ||
68 | return true; | ||
69 | } | ||
70 | |||
71 | static inline bool read_zero(struct kvm_vcpu *vcpu, | ||
72 | const struct sys_reg_params *p) | ||
73 | { | ||
74 | *vcpu_reg(vcpu, p->Rt) = 0; | ||
75 | return true; | ||
76 | } | ||
77 | |||
78 | static inline bool write_to_read_only(struct kvm_vcpu *vcpu, | ||
79 | const struct sys_reg_params *params) | ||
80 | { | ||
81 | kvm_debug("sys_reg write to read-only register at: %lx\n", | ||
82 | *vcpu_pc(vcpu)); | ||
83 | print_sys_reg_instr(params); | ||
84 | return false; | ||
85 | } | ||
86 | |||
87 | static inline bool read_from_write_only(struct kvm_vcpu *vcpu, | ||
88 | const struct sys_reg_params *params) | ||
89 | { | ||
90 | kvm_debug("sys_reg read to write-only register at: %lx\n", | ||
91 | *vcpu_pc(vcpu)); | ||
92 | print_sys_reg_instr(params); | ||
93 | return false; | ||
94 | } | ||
95 | |||
96 | /* Reset functions */ | ||
97 | static inline void reset_unknown(struct kvm_vcpu *vcpu, | ||
98 | const struct sys_reg_desc *r) | ||
99 | { | ||
100 | BUG_ON(!r->reg); | ||
101 | BUG_ON(r->reg >= NR_SYS_REGS); | ||
102 | vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL; | ||
103 | } | ||
104 | |||
105 | static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | ||
106 | { | ||
107 | BUG_ON(!r->reg); | ||
108 | BUG_ON(r->reg >= NR_SYS_REGS); | ||
109 | vcpu_sys_reg(vcpu, r->reg) = r->val; | ||
110 | } | ||
111 | |||
112 | static inline int cmp_sys_reg(const struct sys_reg_desc *i1, | ||
113 | const struct sys_reg_desc *i2) | ||
114 | { | ||
115 | BUG_ON(i1 == i2); | ||
116 | if (!i1) | ||
117 | return 1; | ||
118 | else if (!i2) | ||
119 | return -1; | ||
120 | if (i1->Op0 != i2->Op0) | ||
121 | return i1->Op0 - i2->Op0; | ||
122 | if (i1->Op1 != i2->Op1) | ||
123 | return i1->Op1 - i2->Op1; | ||
124 | if (i1->CRn != i2->CRn) | ||
125 | return i1->CRn - i2->CRn; | ||
126 | if (i1->CRm != i2->CRm) | ||
127 | return i1->CRm - i2->CRm; | ||
128 | return i1->Op2 - i2->Op2; | ||
129 | } | ||
130 | |||
131 | |||
132 | #define Op0(_x) .Op0 = _x | ||
133 | #define Op1(_x) .Op1 = _x | ||
134 | #define CRn(_x) .CRn = _x | ||
135 | #define CRm(_x) .CRm = _x | ||
136 | #define Op2(_x) .Op2 = _x | ||
137 | |||
138 | #endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */ | ||
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c new file mode 100644 index 000000000000..4268ab9356b1 --- /dev/null +++ b/arch/arm64/kvm/sys_regs_generic_v8.c | |||
@@ -0,0 +1,95 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Based on arch/arm/kvm/coproc_a15.c: | ||
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
7 | * Authors: Rusty Russell <rusty@rustcorp.au> | ||
8 | * Christoffer Dall <c.dall@virtualopensystems.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License, version 2, as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
21 | */ | ||
22 | #include <linux/kvm_host.h> | ||
23 | #include <asm/cputype.h> | ||
24 | #include <asm/kvm_arm.h> | ||
25 | #include <asm/kvm_asm.h> | ||
26 | #include <asm/kvm_host.h> | ||
27 | #include <asm/kvm_emulate.h> | ||
28 | #include <asm/kvm_coproc.h> | ||
29 | #include <linux/init.h> | ||
30 | |||
31 | #include "sys_regs.h" | ||
32 | |||
33 | static bool access_actlr(struct kvm_vcpu *vcpu, | ||
34 | const struct sys_reg_params *p, | ||
35 | const struct sys_reg_desc *r) | ||
36 | { | ||
37 | if (p->is_write) | ||
38 | return ignore_write(vcpu, p); | ||
39 | |||
40 | *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1); | ||
41 | return true; | ||
42 | } | ||
43 | |||
44 | static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | ||
45 | { | ||
46 | u64 actlr; | ||
47 | |||
48 | asm volatile("mrs %0, actlr_el1\n" : "=r" (actlr)); | ||
49 | vcpu_sys_reg(vcpu, ACTLR_EL1) = actlr; | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * Implementation specific sys-reg registers. | ||
54 | * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 | ||
55 | */ | ||
56 | static const struct sys_reg_desc genericv8_sys_regs[] = { | ||
57 | /* ACTLR_EL1 */ | ||
58 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001), | ||
59 | access_actlr, reset_actlr, ACTLR_EL1 }, | ||
60 | }; | ||
61 | |||
62 | static const struct sys_reg_desc genericv8_cp15_regs[] = { | ||
63 | /* ACTLR */ | ||
64 | { Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001), | ||
65 | access_actlr }, | ||
66 | }; | ||
67 | |||
68 | static struct kvm_sys_reg_target_table genericv8_target_table = { | ||
69 | .table64 = { | ||
70 | .table = genericv8_sys_regs, | ||
71 | .num = ARRAY_SIZE(genericv8_sys_regs), | ||
72 | }, | ||
73 | .table32 = { | ||
74 | .table = genericv8_cp15_regs, | ||
75 | .num = ARRAY_SIZE(genericv8_cp15_regs), | ||
76 | }, | ||
77 | }; | ||
78 | |||
79 | static int __init sys_reg_genericv8_init(void) | ||
80 | { | ||
81 | unsigned int i; | ||
82 | |||
83 | for (i = 1; i < ARRAY_SIZE(genericv8_sys_regs); i++) | ||
84 | BUG_ON(cmp_sys_reg(&genericv8_sys_regs[i-1], | ||
85 | &genericv8_sys_regs[i]) >= 0); | ||
86 | |||
87 | kvm_register_target_sys_reg_table(KVM_ARM_TARGET_AEM_V8, | ||
88 | &genericv8_target_table); | ||
89 | kvm_register_target_sys_reg_table(KVM_ARM_TARGET_FOUNDATION_V8, | ||
90 | &genericv8_target_table); | ||
91 | kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A57, | ||
92 | &genericv8_target_table); | ||
93 | return 0; | ||
94 | } | ||
95 | late_initcall(sys_reg_genericv8_init); | ||
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index 3140a2abcdc2..b51d36401d83 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile | |||
@@ -2,3 +2,4 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ | |||
2 | cache.o copypage.o flush.o \ | 2 | cache.o copypage.o flush.o \ |
3 | ioremap.o mmap.o pgd.o mmu.o \ | 3 | ioremap.o mmap.o pgd.o mmu.o \ |
4 | context.o tlb.o proc.o | 4 | context.o tlb.o proc.o |
5 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 1426468b77f3..0ecac8980aae 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -365,17 +365,6 @@ static int __kprobes do_translation_fault(unsigned long addr, | |||
365 | } | 365 | } |
366 | 366 | ||
367 | /* | 367 | /* |
368 | * Some section permission faults need to be handled gracefully. They can | ||
369 | * happen due to a __{get,put}_user during an oops. | ||
370 | */ | ||
371 | static int do_sect_fault(unsigned long addr, unsigned int esr, | ||
372 | struct pt_regs *regs) | ||
373 | { | ||
374 | do_bad_area(addr, esr, regs); | ||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * This abort handler always returns "fault". | 368 | * This abort handler always returns "fault". |
380 | */ | 369 | */ |
381 | static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) | 370 | static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) |
@@ -398,12 +387,12 @@ static struct fault_info { | |||
398 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, | 387 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, |
399 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, | 388 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, |
400 | { do_bad, SIGBUS, 0, "reserved access flag fault" }, | 389 | { do_bad, SIGBUS, 0, "reserved access flag fault" }, |
401 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, | 390 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, |
402 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, | 391 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, |
403 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, | 392 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, |
404 | { do_bad, SIGBUS, 0, "reserved permission fault" }, | 393 | { do_bad, SIGBUS, 0, "reserved permission fault" }, |
405 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, | 394 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, |
406 | { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, | 395 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, |
407 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, | 396 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, |
408 | { do_bad, SIGBUS, 0, "synchronous external abort" }, | 397 | { do_bad, SIGBUS, 0, "synchronous external abort" }, |
409 | { do_bad, SIGBUS, 0, "asynchronous external abort" }, | 398 | { do_bad, SIGBUS, 0, "asynchronous external abort" }, |
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 88611c3a421a..e4193e3adc7f 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c | |||
@@ -70,23 +70,16 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | |||
70 | #endif | 70 | #endif |
71 | } | 71 | } |
72 | 72 | ||
73 | void __flush_dcache_page(struct page *page) | ||
74 | { | ||
75 | __flush_dcache_area(page_address(page), PAGE_SIZE); | ||
76 | } | ||
77 | |||
78 | void __sync_icache_dcache(pte_t pte, unsigned long addr) | 73 | void __sync_icache_dcache(pte_t pte, unsigned long addr) |
79 | { | 74 | { |
80 | unsigned long pfn; | 75 | struct page *page = pte_page(pte); |
81 | struct page *page; | ||
82 | 76 | ||
83 | pfn = pte_pfn(pte); | 77 | /* no flushing needed for anonymous pages */ |
84 | if (!pfn_valid(pfn)) | 78 | if (!page_mapping(page)) |
85 | return; | 79 | return; |
86 | 80 | ||
87 | page = pfn_to_page(pfn); | ||
88 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) { | 81 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) { |
89 | __flush_dcache_page(page); | 82 | __flush_dcache_area(page_address(page), PAGE_SIZE); |
90 | __flush_icache_all(); | 83 | __flush_icache_all(); |
91 | } else if (icache_is_aivivt()) { | 84 | } else if (icache_is_aivivt()) { |
92 | __flush_icache_all(); | 85 | __flush_icache_all(); |
@@ -94,28 +87,14 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr) | |||
94 | } | 87 | } |
95 | 88 | ||
96 | /* | 89 | /* |
97 | * Ensure cache coherency between kernel mapping and userspace mapping of this | 90 | * This function is called when a page has been modified by the kernel. Mark |
98 | * page. | 91 | * it as dirty for later flushing when mapped in user space (if executable, |
92 | * see __sync_icache_dcache). | ||
99 | */ | 93 | */ |
100 | void flush_dcache_page(struct page *page) | 94 | void flush_dcache_page(struct page *page) |
101 | { | 95 | { |
102 | struct address_space *mapping; | 96 | if (test_bit(PG_dcache_clean, &page->flags)) |
103 | |||
104 | /* | ||
105 | * The zero page is never written to, so never has any dirty cache | ||
106 | * lines, and therefore never needs to be flushed. | ||
107 | */ | ||
108 | if (page == ZERO_PAGE(0)) | ||
109 | return; | ||
110 | |||
111 | mapping = page_mapping(page); | ||
112 | if (mapping && mapping_mapped(mapping)) { | ||
113 | __flush_dcache_page(page); | ||
114 | __flush_icache_all(); | ||
115 | set_bit(PG_dcache_clean, &page->flags); | ||
116 | } else { | ||
117 | clear_bit(PG_dcache_clean, &page->flags); | 97 | clear_bit(PG_dcache_clean, &page->flags); |
118 | } | ||
119 | } | 98 | } |
120 | EXPORT_SYMBOL(flush_dcache_page); | 99 | EXPORT_SYMBOL(flush_dcache_page); |
121 | 100 | ||
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c new file mode 100644 index 000000000000..2fc8258bab2d --- /dev/null +++ b/arch/arm64/mm/hugetlbpage.c | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | * arch/arm64/mm/hugetlbpage.c | ||
3 | * | ||
4 | * Copyright (C) 2013 Linaro Ltd. | ||
5 | * | ||
6 | * Based on arch/x86/mm/hugetlbpage.c. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | |||
22 | #include <linux/init.h> | ||
23 | #include <linux/fs.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/hugetlb.h> | ||
26 | #include <linux/pagemap.h> | ||
27 | #include <linux/err.h> | ||
28 | #include <linux/sysctl.h> | ||
29 | #include <asm/mman.h> | ||
30 | #include <asm/tlb.h> | ||
31 | #include <asm/tlbflush.h> | ||
32 | #include <asm/pgalloc.h> | ||
33 | |||
34 | #ifndef CONFIG_ARCH_WANT_HUGE_PMD_SHARE | ||
35 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | ||
36 | { | ||
37 | return 0; | ||
38 | } | ||
39 | #endif | ||
40 | |||
41 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, | ||
42 | int write) | ||
43 | { | ||
44 | return ERR_PTR(-EINVAL); | ||
45 | } | ||
46 | |||
47 | int pmd_huge(pmd_t pmd) | ||
48 | { | ||
49 | return !(pmd_val(pmd) & PMD_TABLE_BIT); | ||
50 | } | ||
51 | |||
52 | int pud_huge(pud_t pud) | ||
53 | { | ||
54 | return !(pud_val(pud) & PUD_TABLE_BIT); | ||
55 | } | ||
56 | |||
57 | static __init int setup_hugepagesz(char *opt) | ||
58 | { | ||
59 | unsigned long ps = memparse(opt, &opt); | ||
60 | if (ps == PMD_SIZE) { | ||
61 | hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); | ||
62 | } else if (ps == PUD_SIZE) { | ||
63 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); | ||
64 | } else { | ||
65 | pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20); | ||
66 | return 0; | ||
67 | } | ||
68 | return 1; | ||
69 | } | ||
70 | __setup("hugepagesz=", setup_hugepagesz); | ||
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h index 916701e6d040..d519f4f50c8c 100644 --- a/arch/arm64/mm/mm.h +++ b/arch/arm64/mm/mm.h | |||
@@ -1,3 +1,2 @@ | |||
1 | extern void __flush_dcache_page(struct page *page); | ||
2 | extern void __init bootmem_init(void); | 1 | extern void __init bootmem_init(void); |
3 | extern void __init arm64_swiotlb_init(void); | 2 | extern void __init arm64_swiotlb_init(void); |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index eeecc9c8ed68..a8d1059b91b2 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -297,6 +297,16 @@ static void __init map_mem(void) | |||
297 | { | 297 | { |
298 | struct memblock_region *reg; | 298 | struct memblock_region *reg; |
299 | 299 | ||
300 | /* | ||
301 | * Temporarily limit the memblock range. We need to do this as | ||
302 | * create_mapping requires puds, pmds and ptes to be allocated from | ||
303 | * memory addressable from the initial direct kernel mapping. | ||
304 | * | ||
305 | * The initial direct kernel mapping, located at swapper_pg_dir, | ||
306 | * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (aligned). | ||
307 | */ | ||
308 | memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE); | ||
309 | |||
300 | /* map all the memory banks */ | 310 | /* map all the memory banks */ |
301 | for_each_memblock(memory, reg) { | 311 | for_each_memblock(memory, reg) { |
302 | phys_addr_t start = reg->base; | 312 | phys_addr_t start = reg->base; |
@@ -307,6 +317,9 @@ static void __init map_mem(void) | |||
307 | 317 | ||
308 | create_mapping(start, __phys_to_virt(start), end - start); | 318 | create_mapping(start, __phys_to_virt(start), end - start); |
309 | } | 319 | } |
320 | |||
321 | /* Limit no longer required. */ | ||
322 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); | ||
310 | } | 323 | } |
311 | 324 | ||
312 | /* | 325 | /* |
@@ -317,12 +330,6 @@ void __init paging_init(void) | |||
317 | { | 330 | { |
318 | void *zero_page; | 331 | void *zero_page; |
319 | 332 | ||
320 | /* | ||
321 | * Maximum PGDIR_SIZE addressable via the initial direct kernel | ||
322 | * mapping in swapper_pg_dir. | ||
323 | */ | ||
324 | memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE); | ||
325 | |||
326 | init_mem_pgprot(); | 333 | init_mem_pgprot(); |
327 | map_mem(); | 334 | map_mem(); |
328 | 335 | ||
@@ -339,7 +346,6 @@ void __init paging_init(void) | |||
339 | bootmem_init(); | 346 | bootmem_init(); |
340 | 347 | ||
341 | empty_zero_page = virt_to_page(zero_page); | 348 | empty_zero_page = virt_to_page(zero_page); |
342 | __flush_dcache_page(empty_zero_page); | ||
343 | 349 | ||
344 | /* | 350 | /* |
345 | * TTBR0 is only used for the identity mapping at this stage. Make it | 351 | * TTBR0 is only used for the identity mapping at this stage. Make it |
diff --git a/arch/arm64/xen/Makefile b/arch/arm64/xen/Makefile new file mode 100644 index 000000000000..be240404ba96 --- /dev/null +++ b/arch/arm64/xen/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o) | ||
2 | obj-y := xen-arm.o hypercall.o | ||
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S new file mode 100644 index 000000000000..2816c479cd49 --- /dev/null +++ b/arch/arm64/xen/hypercall.S | |||
@@ -0,0 +1,92 @@ | |||
1 | /****************************************************************************** | ||
2 | * hypercall.S | ||
3 | * | ||
4 | * Xen hypercall wrappers | ||
5 | * | ||
6 | * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version 2 | ||
10 | * as published by the Free Software Foundation; or, when distributed | ||
11 | * separately from the Linux kernel or incorporated into other | ||
12 | * software packages, subject to the following license: | ||
13 | * | ||
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | * of this source file (the "Software"), to deal in the Software without | ||
16 | * restriction, including without limitation the rights to use, copy, modify, | ||
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
18 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
19 | * the following conditions: | ||
20 | * | ||
21 | * The above copyright notice and this permission notice shall be included in | ||
22 | * all copies or substantial portions of the Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
30 | * IN THE SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | /* | ||
34 | * The Xen hypercall calling convention is very similar to the procedure | ||
35 | * call standard for the ARM 64-bit architecture: the first parameter is | ||
36 | * passed in x0, the second in x1, the third in x2, the fourth in x3 and | ||
37 | * the fifth in x4. | ||
38 | * | ||
39 | * The hypercall number is passed in x16. | ||
40 | * | ||
41 | * The return value is in x0. | ||
42 | * | ||
43 | * The hvc ISS is required to be 0xEA1, that is the Xen specific ARM | ||
44 | * hypercall tag. | ||
45 | * | ||
46 | * Parameter structs passed to hypercalls are laid out according to | ||
47 | * the ARM 64-bit EABI standard. | ||
48 | */ | ||
49 | |||
50 | #include <linux/linkage.h> | ||
51 | #include <asm/assembler.h> | ||
52 | #include <xen/interface/xen.h> | ||
53 | |||
54 | |||
55 | #define XEN_IMM 0xEA1 | ||
56 | |||
57 | #define HYPERCALL_SIMPLE(hypercall) \ | ||
58 | ENTRY(HYPERVISOR_##hypercall) \ | ||
59 | mov x16, #__HYPERVISOR_##hypercall; \ | ||
60 | hvc XEN_IMM; \ | ||
61 | ret; \ | ||
62 | ENDPROC(HYPERVISOR_##hypercall) | ||
63 | |||
64 | #define HYPERCALL0 HYPERCALL_SIMPLE | ||
65 | #define HYPERCALL1 HYPERCALL_SIMPLE | ||
66 | #define HYPERCALL2 HYPERCALL_SIMPLE | ||
67 | #define HYPERCALL3 HYPERCALL_SIMPLE | ||
68 | #define HYPERCALL4 HYPERCALL_SIMPLE | ||
69 | #define HYPERCALL5 HYPERCALL_SIMPLE | ||
70 | |||
71 | .text | ||
72 | |||
73 | HYPERCALL2(xen_version); | ||
74 | HYPERCALL3(console_io); | ||
75 | HYPERCALL3(grant_table_op); | ||
76 | HYPERCALL2(sched_op); | ||
77 | HYPERCALL2(event_channel_op); | ||
78 | HYPERCALL2(hvm_op); | ||
79 | HYPERCALL2(memory_op); | ||
80 | HYPERCALL2(physdev_op); | ||
81 | HYPERCALL3(vcpu_op); | ||
82 | |||
83 | ENTRY(privcmd_call) | ||
84 | mov x16, x0 | ||
85 | mov x0, x1 | ||
86 | mov x1, x2 | ||
87 | mov x2, x3 | ||
88 | mov x3, x4 | ||
89 | mov x4, x5 | ||
90 | hvc XEN_IMM | ||
91 | ret | ||
92 | ENDPROC(privcmd_call); | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d98b665e6536..b094816a7e0f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -207,6 +207,12 @@ config ARCH_HIBERNATION_POSSIBLE | |||
207 | config ARCH_SUSPEND_POSSIBLE | 207 | config ARCH_SUSPEND_POSSIBLE |
208 | def_bool y | 208 | def_bool y |
209 | 209 | ||
210 | config ARCH_WANT_HUGE_PMD_SHARE | ||
211 | def_bool y | ||
212 | |||
213 | config ARCH_WANT_GENERAL_HUGETLB | ||
214 | def_bool y | ||
215 | |||
210 | config ZONE_DMA32 | 216 | config ZONE_DMA32 |
211 | bool | 217 | bool |
212 | default X86_64 | 218 | default X86_64 |
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index ae1aa71d0115..7e73e8c69096 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c | |||
@@ -16,169 +16,6 @@ | |||
16 | #include <asm/tlbflush.h> | 16 | #include <asm/tlbflush.h> |
17 | #include <asm/pgalloc.h> | 17 | #include <asm/pgalloc.h> |
18 | 18 | ||
19 | static unsigned long page_table_shareable(struct vm_area_struct *svma, | ||
20 | struct vm_area_struct *vma, | ||
21 | unsigned long addr, pgoff_t idx) | ||
22 | { | ||
23 | unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + | ||
24 | svma->vm_start; | ||
25 | unsigned long sbase = saddr & PUD_MASK; | ||
26 | unsigned long s_end = sbase + PUD_SIZE; | ||
27 | |||
28 | /* Allow segments to share if only one is marked locked */ | ||
29 | unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED; | ||
30 | unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED; | ||
31 | |||
32 | /* | ||
33 | * match the virtual addresses, permission and the alignment of the | ||
34 | * page table page. | ||
35 | */ | ||
36 | if (pmd_index(addr) != pmd_index(saddr) || | ||
37 | vm_flags != svm_flags || | ||
38 | sbase < svma->vm_start || svma->vm_end < s_end) | ||
39 | return 0; | ||
40 | |||
41 | return saddr; | ||
42 | } | ||
43 | |||
44 | static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) | ||
45 | { | ||
46 | unsigned long base = addr & PUD_MASK; | ||
47 | unsigned long end = base + PUD_SIZE; | ||
48 | |||
49 | /* | ||
50 | * check on proper vm_flags and page table alignment | ||
51 | */ | ||
52 | if (vma->vm_flags & VM_MAYSHARE && | ||
53 | vma->vm_start <= base && end <= vma->vm_end) | ||
54 | return 1; | ||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() | ||
60 | * and returns the corresponding pte. While this is not necessary for the | ||
61 | * !shared pmd case because we can allocate the pmd later as well, it makes the | ||
62 | * code much cleaner. pmd allocation is essential for the shared case because | ||
63 | * pud has to be populated inside the same i_mmap_mutex section - otherwise | ||
64 | * racing tasks could either miss the sharing (see huge_pte_offset) or select a | ||
65 | * bad pmd for sharing. | ||
66 | */ | ||
67 | static pte_t * | ||
68 | huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | ||
69 | { | ||
70 | struct vm_area_struct *vma = find_vma(mm, addr); | ||
71 | struct address_space *mapping = vma->vm_file->f_mapping; | ||
72 | pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + | ||
73 | vma->vm_pgoff; | ||
74 | struct vm_area_struct *svma; | ||
75 | unsigned long saddr; | ||
76 | pte_t *spte = NULL; | ||
77 | pte_t *pte; | ||
78 | |||
79 | if (!vma_shareable(vma, addr)) | ||
80 | return (pte_t *)pmd_alloc(mm, pud, addr); | ||
81 | |||
82 | mutex_lock(&mapping->i_mmap_mutex); | ||
83 | vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { | ||
84 | if (svma == vma) | ||
85 | continue; | ||
86 | |||
87 | saddr = page_table_shareable(svma, vma, addr, idx); | ||
88 | if (saddr) { | ||
89 | spte = huge_pte_offset(svma->vm_mm, saddr); | ||
90 | if (spte) { | ||
91 | get_page(virt_to_page(spte)); | ||
92 | break; | ||
93 | } | ||
94 | } | ||
95 | } | ||
96 | |||
97 | if (!spte) | ||
98 | goto out; | ||
99 | |||
100 | spin_lock(&mm->page_table_lock); | ||
101 | if (pud_none(*pud)) | ||
102 | pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK)); | ||
103 | else | ||
104 | put_page(virt_to_page(spte)); | ||
105 | spin_unlock(&mm->page_table_lock); | ||
106 | out: | ||
107 | pte = (pte_t *)pmd_alloc(mm, pud, addr); | ||
108 | mutex_unlock(&mapping->i_mmap_mutex); | ||
109 | return pte; | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * unmap huge page backed by shared pte. | ||
114 | * | ||
115 | * Hugetlb pte page is ref counted at the time of mapping. If pte is shared | ||
116 | * indicated by page_count > 1, unmap is achieved by clearing pud and | ||
117 | * decrementing the ref count. If count == 1, the pte page is not shared. | ||
118 | * | ||
119 | * called with vma->vm_mm->page_table_lock held. | ||
120 | * | ||
121 | * returns: 1 successfully unmapped a shared pte page | ||
122 | * 0 the underlying pte page is not shared, or it is the last user | ||
123 | */ | ||
124 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | ||
125 | { | ||
126 | pgd_t *pgd = pgd_offset(mm, *addr); | ||
127 | pud_t *pud = pud_offset(pgd, *addr); | ||
128 | |||
129 | BUG_ON(page_count(virt_to_page(ptep)) == 0); | ||
130 | if (page_count(virt_to_page(ptep)) == 1) | ||
131 | return 0; | ||
132 | |||
133 | pud_clear(pud); | ||
134 | put_page(virt_to_page(ptep)); | ||
135 | *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; | ||
136 | return 1; | ||
137 | } | ||
138 | |||
139 | pte_t *huge_pte_alloc(struct mm_struct *mm, | ||
140 | unsigned long addr, unsigned long sz) | ||
141 | { | ||
142 | pgd_t *pgd; | ||
143 | pud_t *pud; | ||
144 | pte_t *pte = NULL; | ||
145 | |||
146 | pgd = pgd_offset(mm, addr); | ||
147 | pud = pud_alloc(mm, pgd, addr); | ||
148 | if (pud) { | ||
149 | if (sz == PUD_SIZE) { | ||
150 | pte = (pte_t *)pud; | ||
151 | } else { | ||
152 | BUG_ON(sz != PMD_SIZE); | ||
153 | if (pud_none(*pud)) | ||
154 | pte = huge_pmd_share(mm, addr, pud); | ||
155 | else | ||
156 | pte = (pte_t *)pmd_alloc(mm, pud, addr); | ||
157 | } | ||
158 | } | ||
159 | BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); | ||
160 | |||
161 | return pte; | ||
162 | } | ||
163 | |||
164 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
165 | { | ||
166 | pgd_t *pgd; | ||
167 | pud_t *pud; | ||
168 | pmd_t *pmd = NULL; | ||
169 | |||
170 | pgd = pgd_offset(mm, addr); | ||
171 | if (pgd_present(*pgd)) { | ||
172 | pud = pud_offset(pgd, addr); | ||
173 | if (pud_present(*pud)) { | ||
174 | if (pud_large(*pud)) | ||
175 | return (pte_t *)pud; | ||
176 | pmd = pmd_offset(pud, addr); | ||
177 | } | ||
178 | } | ||
179 | return (pte_t *) pmd; | ||
180 | } | ||
181 | |||
182 | #if 0 /* This is just for testing */ | 19 | #if 0 /* This is just for testing */ |
183 | struct page * | 20 | struct page * |
184 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | 21 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
@@ -240,30 +77,6 @@ int pud_huge(pud_t pud) | |||
240 | return !!(pud_val(pud) & _PAGE_PSE); | 77 | return !!(pud_val(pud) & _PAGE_PSE); |
241 | } | 78 | } |
242 | 79 | ||
243 | struct page * | ||
244 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, | ||
245 | pmd_t *pmd, int write) | ||
246 | { | ||
247 | struct page *page; | ||
248 | |||
249 | page = pte_page(*(pte_t *)pmd); | ||
250 | if (page) | ||
251 | page += ((address & ~PMD_MASK) >> PAGE_SHIFT); | ||
252 | return page; | ||
253 | } | ||
254 | |||
255 | struct page * | ||
256 | follow_huge_pud(struct mm_struct *mm, unsigned long address, | ||
257 | pud_t *pud, int write) | ||
258 | { | ||
259 | struct page *page; | ||
260 | |||
261 | page = pte_page(*(pte_t *)pud); | ||
262 | if (page) | ||
263 | page += ((address & ~PUD_MASK) >> PAGE_SHIFT); | ||
264 | return page; | ||
265 | } | ||
266 | |||
267 | #endif | 80 | #endif |
268 | 81 | ||
269 | /* x86_64 also uses this file */ | 82 | /* x86_64 also uses this file */ |