aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2013-04-12 14:12:06 -0400
committerChristoffer Dall <cdall@cs.columbia.edu>2013-04-29 01:23:10 -0400
commit5a677ce044f18a341ab942e23516e52ad89f7687 (patch)
tree3db77d34c381b19b0a73afbe95a13414a5daa8f6 /arch/arm/include/asm
parent4f728276fbf1e043010485d7e9275082a1c3d650 (diff)
ARM: KVM: switch to a dual-step HYP init code
Our HYP init code suffers from two major design issues: - it cannot support CPU hotplug, as we tear down the idmap very early - it cannot perform a TLB invalidation when switching from init to runtime mappings, as pages are manipulated from PL1 exclusively The hotplug problem mandates that we keep two sets of page tables (boot and runtime). The TLB problem mandates that we're able to transition from one PGD to another while in HYP, invalidating the TLBs in the process. To be able to do this, we need to share a page between the two page tables. A page that will have the same VA in both configurations. All we need is a VA that has the following properties: - This VA can't be used to represent a kernel mapping. - This VA will not conflict with the physical address of the kernel text The vectors page seems to satisfy this requirement: - The kernel never maps anything else there - The kernel text being copied at the beginning of the physical memory, it is unlikely to use the last 64kB (I doubt we'll ever support KVM on a system with something like 4MB of RAM, but patches are very welcome). Let's call this VA the trampoline VA. Now, we map our init page at 3 locations: - idmap in the boot pgd - trampoline VA in the boot pgd - trampoline VA in the runtime pgd The init scenario is now the following: - We jump in HYP with four parameters: boot HYP pgd, runtime HYP pgd, runtime stack, runtime vectors - Enable the MMU with the boot pgd - Jump to a target into the trampoline page (remember, this is the same physical page!) - Now switch to the runtime pgd (same VA, and still the same physical page!) - Invalidate TLBs - Set stack and vectors - Profit! (or eret, if you only care about the code). Note that we keep the boot mapping permanently (it is not strictly an idmap anymore) to allow for CPU hotplug in later patches. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@cs.columbia.edu>
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/kvm_host.h31
-rw-r--r--arch/arm/include/asm/kvm_mmu.h24
2 files changed, 40 insertions, 15 deletions
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 78813b8fad32..6c2a35da867e 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -188,23 +188,30 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
188int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 188int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
189 int exception_index); 189 int exception_index);
190 190
191static inline void __cpu_init_hyp_mode(unsigned long long pgd_ptr, 191static inline void __cpu_init_hyp_mode(unsigned long long boot_pgd_ptr,
192 unsigned long long pgd_ptr,
192 unsigned long hyp_stack_ptr, 193 unsigned long hyp_stack_ptr,
193 unsigned long vector_ptr) 194 unsigned long vector_ptr)
194{ 195{
195 unsigned long pgd_low, pgd_high;
196
197 pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
198 pgd_high = (pgd_ptr >> 32ULL);
199
200 /* 196 /*
201 * Call initialization code, and switch to the full blown 197 * Call initialization code, and switch to the full blown HYP
202 * HYP code. The init code doesn't need to preserve these registers as 198 * code. The init code doesn't need to preserve these
203 * r1-r3 and r12 are already callee save according to the AAPCS. 199 * registers as r0-r3 are already callee saved according to
204 * Note that we slightly misuse the prototype by casing the pgd_low to 200 * the AAPCS.
205 * a void *. 201 * Note that we slightly misuse the prototype by casing the
202 * stack pointer to a void *.
203 *
204 * We don't have enough registers to perform the full init in
205 * one go. Install the boot PGD first, and then install the
206 * runtime PGD, stack pointer and vectors. The PGDs are always
207 * passed as the third argument, in order to be passed into
208 * r2-r3 to the init code (yes, this is compliant with the
209 * PCS!).
206 */ 210 */
207 kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr); 211
212 kvm_call_hyp(NULL, 0, boot_pgd_ptr);
213
214 kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
208} 215}
209 216
210int kvm_perf_init(void); 217int kvm_perf_init(void);
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 92eb20d57942..24b767a8cdb9 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -19,17 +19,29 @@
19#ifndef __ARM_KVM_MMU_H__ 19#ifndef __ARM_KVM_MMU_H__
20#define __ARM_KVM_MMU_H__ 20#define __ARM_KVM_MMU_H__
21 21
22#include <asm/cacheflush.h> 22#include <asm/memory.h>
23#include <asm/pgalloc.h> 23#include <asm/page.h>
24 24
25/* 25/*
26 * We directly use the kernel VA for the HYP, as we can directly share 26 * We directly use the kernel VA for the HYP, as we can directly share
27 * the mapping (HTTBR "covers" TTBR1). 27 * the mapping (HTTBR "covers" TTBR1).
28 */ 28 */
29#define HYP_PAGE_OFFSET_MASK (~0UL) 29#define HYP_PAGE_OFFSET_MASK UL(~0)
30#define HYP_PAGE_OFFSET PAGE_OFFSET 30#define HYP_PAGE_OFFSET PAGE_OFFSET
31#define KERN_TO_HYP(kva) (kva) 31#define KERN_TO_HYP(kva) (kva)
32 32
33/*
34 * Our virtual mapping for the boot-time MMU-enable code. Must be
35 * shared across all the page-tables. Conveniently, we use the vectors
36 * page, where no kernel data will ever be shared with HYP.
37 */
38#define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
39
40#ifndef __ASSEMBLY__
41
42#include <asm/cacheflush.h>
43#include <asm/pgalloc.h>
44
33int create_hyp_mappings(void *from, void *to); 45int create_hyp_mappings(void *from, void *to);
34int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 46int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
35void free_hyp_pgds(void); 47void free_hyp_pgds(void);
@@ -44,6 +56,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
44void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); 56void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
45 57
46phys_addr_t kvm_mmu_get_httbr(void); 58phys_addr_t kvm_mmu_get_httbr(void);
59phys_addr_t kvm_mmu_get_boot_httbr(void);
60phys_addr_t kvm_get_idmap_vector(void);
47int kvm_mmu_init(void); 61int kvm_mmu_init(void);
48void kvm_clear_hyp_idmap(void); 62void kvm_clear_hyp_idmap(void);
49 63
@@ -113,4 +127,8 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
113 } 127 }
114} 128}
115 129
130#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
131
132#endif /* !__ASSEMBLY__ */
133
116#endif /* __ARM_KVM_MMU_H__ */ 134#endif /* __ARM_KVM_MMU_H__ */