aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-16 20:17:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-16 20:17:24 -0400
commitbe092017b6ffbd013f481f915632db6aa9fc3ca3 (patch)
tree56f37b2b232ef41c0202c4f57d8e83e93d9168f4 /arch/arm64/kvm
parentfb6363e9f4eeb37323feb8253b93854195942b8b (diff)
parente6d9a52543338603e25e71e0e4942f05dae0dd8a (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: - virt_to_page/page_address optimisations - support for NUMA systems described using device-tree - support for hibernate/suspend-to-disk - proper support for maxcpus= command line parameter - detection and graceful handling of AArch64-only CPUs - miscellaneous cleanups and non-critical fixes * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (92 commits) arm64: do not enforce strict 16 byte alignment to stack pointer arm64: kernel: Fix incorrect brk randomization arm64: cpuinfo: Missing NULL terminator in compat_hwcap_str arm64: secondary_start_kernel: Remove unnecessary barrier arm64: Ensure pmd_present() returns false after pmd_mknotpresent() arm64: Replace hard-coded values in the pmd/pud_bad() macros arm64: Implement pmdp_set_access_flags() for hardware AF/DBM arm64: Fix typo in the pmdp_huge_get_and_clear() definition arm64: mm: remove unnecessary EXPORT_SYMBOL_GPL arm64: always use STRICT_MM_TYPECHECKS arm64: kvm: Fix kvm teardown for systems using the extended idmap arm64: kaslr: increase randomization granularity arm64: kconfig: drop CONFIG_RTC_LIB dependency arm64: make ARCH_SUPPORTS_DEBUG_PAGEALLOC depend on !HIBERNATION arm64: hibernate: Refuse to hibernate if the boot cpu is offline arm64: kernel: Add support for hibernate/suspend-to-disk PM / Hibernate: Call flush_icache_range() on pages restored in-place arm64: Add new asm macro copy_page arm64: Promote KERNEL_START/KERNEL_END definitions to a header file arm64: kernel: Include _AC definition in page.h ...
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/handle_exit.c7
-rw-r--r--arch/arm64/kvm/hyp-init.S48
-rw-r--r--arch/arm64/kvm/hyp.S11
-rw-r--r--arch/arm64/kvm/hyp/entry.S19
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S10
-rw-r--r--arch/arm64/kvm/reset.c30
6 files changed, 114 insertions, 11 deletions
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index eba89e42f0ed..3246c4aba5b1 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -186,6 +186,13 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
186 exit_handler = kvm_get_exit_handler(vcpu); 186 exit_handler = kvm_get_exit_handler(vcpu);
187 187
188 return exit_handler(vcpu, run); 188 return exit_handler(vcpu, run);
189 case ARM_EXCEPTION_HYP_GONE:
190 /*
191 * EL2 has been reset to the hyp-stub. This happens when a guest
192 * is pre-empted by kvm_reboot()'s shutdown call.
193 */
194 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
195 return 0;
189 default: 196 default:
190 kvm_pr_unimpl("Unsupported exception type: %d", 197 kvm_pr_unimpl("Unsupported exception type: %d",
191 exception_index); 198 exception_index);
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 7d8747c6427c..a873a6d8be90 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -21,6 +21,7 @@
21#include <asm/kvm_arm.h> 21#include <asm/kvm_arm.h>
22#include <asm/kvm_mmu.h> 22#include <asm/kvm_mmu.h>
23#include <asm/pgtable-hwdef.h> 23#include <asm/pgtable-hwdef.h>
24#include <asm/sysreg.h>
24 25
25 .text 26 .text
26 .pushsection .hyp.idmap.text, "ax" 27 .pushsection .hyp.idmap.text, "ax"
@@ -103,8 +104,8 @@ __do_hyp_init:
103 dsb sy 104 dsb sy
104 105
105 mrs x4, sctlr_el2 106 mrs x4, sctlr_el2
106 and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2 107 and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2
107 ldr x5, =SCTLR_EL2_FLAGS 108 ldr x5, =SCTLR_ELx_FLAGS
108 orr x4, x4, x5 109 orr x4, x4, x5
109 msr sctlr_el2, x4 110 msr sctlr_el2, x4
110 isb 111 isb
@@ -138,6 +139,49 @@ merged:
138 eret 139 eret
139ENDPROC(__kvm_hyp_init) 140ENDPROC(__kvm_hyp_init)
140 141
142 /*
143 * Reset kvm back to the hyp stub. This is the trampoline dance in
144 * reverse. If kvm used an extended idmap, __extended_idmap_trampoline
145 * calls this code directly in the idmap. In this case switching to the
146 * boot tables is a no-op.
147 *
148 * x0: HYP boot pgd
149 * x1: HYP phys_idmap_start
150 */
151ENTRY(__kvm_hyp_reset)
152 /* We're in trampoline code in VA, switch back to boot page tables */
153 msr ttbr0_el2, x0
154 isb
155
156 /* Ensure the PA branch doesn't find a stale tlb entry or stale code. */
157 ic iallu
158 tlbi alle2
159 dsb sy
160 isb
161
162 /* Branch into PA space */
163 adr x0, 1f
164 bfi x1, x0, #0, #PAGE_SHIFT
165 br x1
166
167 /* We're now in idmap, disable MMU */
1681: mrs x0, sctlr_el2
169 ldr x1, =SCTLR_ELx_FLAGS
170 bic x0, x0, x1 // Clear SCTL_M and etc
171 msr sctlr_el2, x0
172 isb
173
174 /* Invalidate the old TLBs */
175 tlbi alle2
176 dsb sy
177
178 /* Install stub vectors */
179 adr_l x0, __hyp_stub_vectors
180 msr vbar_el2, x0
181
182 eret
183ENDPROC(__kvm_hyp_reset)
184
141 .ltorg 185 .ltorg
142 186
143 .popsection 187 .popsection
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 48f19a37b3df..7ce931565151 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -35,16 +35,21 @@
35 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are 35 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
36 * passed in x0. 36 * passed in x0.
37 * 37 *
38 * A function pointer with a value of 0 has a special meaning, and is 38 * A function pointer with a value less than 0xfff has a special meaning,
39 * used to implement __hyp_get_vectors in the same way as in 39 * and is used to implement __hyp_get_vectors in the same way as in
40 * arch/arm64/kernel/hyp_stub.S. 40 * arch/arm64/kernel/hyp_stub.S.
41 * HVC behaves as a 'bl' call and will clobber lr.
41 */ 42 */
42ENTRY(__kvm_call_hyp) 43ENTRY(__kvm_call_hyp)
43alternative_if_not ARM64_HAS_VIRT_HOST_EXTN 44alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
45 str lr, [sp, #-16]!
44 hvc #0 46 hvc #0
47 ldr lr, [sp], #16
45 ret 48 ret
46alternative_else 49alternative_else
47 b __vhe_hyp_call 50 b __vhe_hyp_call
48 nop 51 nop
52 nop
53 nop
49alternative_endif 54alternative_endif
50ENDPROC(__kvm_call_hyp) 55ENDPROC(__kvm_call_hyp)
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index ce9e5e5f28cf..70254a65bd5b 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -164,3 +164,22 @@ alternative_endif
164 164
165 eret 165 eret
166ENDPROC(__fpsimd_guest_restore) 166ENDPROC(__fpsimd_guest_restore)
167
168/*
169 * When using the extended idmap, we don't have a trampoline page we can use
170 * while we switch pages tables during __kvm_hyp_reset. Accessing the idmap
171 * directly would be ideal, but if we're using the extended idmap then the
172 * idmap is located above HYP_PAGE_OFFSET, and the address will be masked by
173 * kvm_call_hyp using kern_hyp_va.
174 *
175 * x0: HYP boot pgd
176 * x1: HYP phys_idmap_start
177 */
178ENTRY(__extended_idmap_trampoline)
179 mov x4, x1
180 adr_l x3, __kvm_hyp_reset
181
182 /* insert __kvm_hyp_reset()s offset into phys_idmap_start */
183 bfi x4, x3, #0, #PAGE_SHIFT
184 br x4
185ENDPROC(__extended_idmap_trampoline)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 3488894397ff..2d87f36d5cb4 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -42,19 +42,17 @@
42 * Shuffle the parameters before calling the function 42 * Shuffle the parameters before calling the function
43 * pointed to in x0. Assumes parameters in x[1,2,3]. 43 * pointed to in x0. Assumes parameters in x[1,2,3].
44 */ 44 */
45 sub sp, sp, #16
46 str lr, [sp]
47 mov lr, x0 45 mov lr, x0
48 mov x0, x1 46 mov x0, x1
49 mov x1, x2 47 mov x1, x2
50 mov x2, x3 48 mov x2, x3
51 blr lr 49 blr lr
52 ldr lr, [sp]
53 add sp, sp, #16
54.endm 50.endm
55 51
56ENTRY(__vhe_hyp_call) 52ENTRY(__vhe_hyp_call)
53 str lr, [sp, #-16]!
57 do_el2_call 54 do_el2_call
55 ldr lr, [sp], #16
58 /* 56 /*
59 * We used to rely on having an exception return to get 57 * We used to rely on having an exception return to get
60 * an implicit isb. In the E2H case, we don't have it anymore. 58 * an implicit isb. In the E2H case, we don't have it anymore.
@@ -84,8 +82,8 @@ alternative_endif
84 /* Here, we're pretty sure the host called HVC. */ 82 /* Here, we're pretty sure the host called HVC. */
85 restore_x0_to_x3 83 restore_x0_to_x3
86 84
87 /* Check for __hyp_get_vectors */ 85 cmp x0, #HVC_GET_VECTORS
88 cbnz x0, 1f 86 b.ne 1f
89 mrs x0, vbar_el2 87 mrs x0, vbar_el2
90 b 2f 88 b 2f
91 89
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 9677bf069bcc..b1ad730e1567 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -29,7 +29,9 @@
29#include <asm/cputype.h> 29#include <asm/cputype.h>
30#include <asm/ptrace.h> 30#include <asm/ptrace.h>
31#include <asm/kvm_arm.h> 31#include <asm/kvm_arm.h>
32#include <asm/kvm_asm.h>
32#include <asm/kvm_coproc.h> 33#include <asm/kvm_coproc.h>
34#include <asm/kvm_mmu.h>
33 35
34/* 36/*
35 * ARMv8 Reset Values 37 * ARMv8 Reset Values
@@ -130,3 +132,31 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
130 /* Reset timer */ 132 /* Reset timer */
131 return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); 133 return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
132} 134}
135
136extern char __hyp_idmap_text_start[];
137
138unsigned long kvm_hyp_reset_entry(void)
139{
140 if (!__kvm_cpu_uses_extended_idmap()) {
141 unsigned long offset;
142
143 /*
144 * Find the address of __kvm_hyp_reset() in the trampoline page.
145 * This is present in the running page tables, and the boot page
146 * tables, so we call the code here to start the trampoline
147 * dance in reverse.
148 */
149 offset = (unsigned long)__kvm_hyp_reset
150 - ((unsigned long)__hyp_idmap_text_start & PAGE_MASK);
151
152 return TRAMPOLINE_VA + offset;
153 } else {
154 /*
155 * KVM is running with merged page tables, which don't have the
156 * trampoline page mapped. We know the idmap is still mapped,
157 * but can't be called into directly. Use
158 * __extended_idmap_trampoline to do the call.
159 */
160 return (unsigned long)kvm_ksym_ref(__extended_idmap_trampoline);
161 }
162}