aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorAKASHI Takahiro <takahiro.akashi@linaro.org>2016-04-27 12:47:05 -0400
committerWill Deacon <will.deacon@arm.com>2016-04-28 07:05:46 -0400
commit67f6919766620e7ea7aab11a6a3470dc7b451359 (patch)
tree603439d01bc9ecb8e8d7f0abda6b5a1bed2394e4 /arch/arm64/kvm
parentc94b0cf28281d483c8b43b4874fcb7ab14ade1b1 (diff)
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization at system boot, and has no way to gracefully shutdown a core in terms of kvm. This prevents kexec from rebooting the system at EL2. This patch adds a cpu tear-down function and also puts an existing cpu-init code into a separate function, kvm_arch_hardware_disable() and kvm_arch_hardware_enable() respectively. We don't need the arm64 specific cpu hotplug hook any more. Since this patch modifies common code between arm and arm64, one stub definition, __cpu_reset_hyp_mode(), is added on arm side to avoid compilation errors. Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org> [Rebase, added separate VHE init/exit path, changed resets use of kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(), added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed guest-enter after teardown handling] Signed-off-by: James Morse <james.morse@arm.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/hyp-init.S38
-rw-r--r--arch/arm64/kvm/reset.c14
2 files changed, 52 insertions, 0 deletions
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 5ce1b47ef770..44ec4cb23ae7 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -139,6 +139,44 @@ merged:
139 eret 139 eret
140ENDPROC(__kvm_hyp_init) 140ENDPROC(__kvm_hyp_init)
141 141
142 /*
143 * x0: HYP boot pgd
144 * x1: HYP phys_idmap_start
145 */
146ENTRY(__kvm_hyp_reset)
147 /* We're in trampoline code in VA, switch back to boot page tables */
148 msr ttbr0_el2, x0
149 isb
150
151 /* Ensure the PA branch doesn't find a stale tlb entry or stale code. */
152 ic iallu
153 tlbi alle2
154 dsb sy
155 isb
156
157 /* Branch into PA space */
158 adr x0, 1f
159 bfi x1, x0, #0, #PAGE_SHIFT
160 br x1
161
162 /* We're now in idmap, disable MMU */
1631: mrs x0, sctlr_el2
164 ldr x1, =SCTLR_ELx_FLAGS
165 bic x0, x0, x1 // Clear SCTL_M and etc
166 msr sctlr_el2, x0
167 isb
168
169 /* Invalidate the old TLBs */
170 tlbi alle2
171 dsb sy
172
173 /* Install stub vectors */
174 adr_l x0, __hyp_stub_vectors
175 msr vbar_el2, x0
176
177 eret
178ENDPROC(__kvm_hyp_reset)
179
142 .ltorg 180 .ltorg
143 181
144 .popsection 182 .popsection
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 9677bf069bcc..4062e6dd4cc1 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -29,7 +29,9 @@
29#include <asm/cputype.h> 29#include <asm/cputype.h>
30#include <asm/ptrace.h> 30#include <asm/ptrace.h>
31#include <asm/kvm_arm.h> 31#include <asm/kvm_arm.h>
32#include <asm/kvm_asm.h>
32#include <asm/kvm_coproc.h> 33#include <asm/kvm_coproc.h>
34#include <asm/kvm_mmu.h>
33 35
34/* 36/*
35 * ARMv8 Reset Values 37 * ARMv8 Reset Values
@@ -130,3 +132,15 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
130 /* Reset timer */ 132 /* Reset timer */
131 return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); 133 return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
132} 134}
135
136extern char __hyp_idmap_text_start[];
137
138phys_addr_t kvm_hyp_reset_entry(void)
139{
140 unsigned long offset;
141
142 offset = (unsigned long)__kvm_hyp_reset
143 - ((unsigned long)__hyp_idmap_text_start & PAGE_MASK);
144
145 return TRAMPOLINE_VA + offset;
146}