aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2015-03-19 12:42:28 -0400
committerWill Deacon <will.deacon@arm.com>2015-03-23 07:35:29 -0400
commite4c5a6851058386c9e109ad529717a23173918bc (patch)
treebc04e772bb53b5166ffd5bfce36c99ca32a4b0e5 /arch/arm64/include
parentdd006da21646f1c86f0242eb8f527d093303127a (diff)
arm64: KVM: use ID map with increased VA range if required
This patch modifies the HYP init code so it can deal with system RAM residing at an offset which exceeds the reach of VA_BITS. Like for EL1, this involves configuring an additional level of translation for the ID map. However, in case of EL2, this implies that all translations use the extra level, as we cannot seamlessly switch between translation tables with different numbers of translation levels. So add an extra translation table at the root level. Since the ID map and the runtime HYP map are guaranteed not to overlap, they can share this root level, and we can essentially merge these two tables into one. Tested-by: Marc Zyngier <marc.zyngier@arm.com> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h33
1 files changed, 33 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6458b5373142..edfe6864bc28 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -68,6 +68,8 @@
68#include <asm/pgalloc.h> 68#include <asm/pgalloc.h>
69#include <asm/cachetype.h> 69#include <asm/cachetype.h>
70#include <asm/cacheflush.h> 70#include <asm/cacheflush.h>
71#include <asm/mmu_context.h>
72#include <asm/pgtable.h>
71 73
72#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) 74#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
73 75
@@ -305,5 +307,36 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
305void kvm_set_way_flush(struct kvm_vcpu *vcpu); 307void kvm_set_way_flush(struct kvm_vcpu *vcpu);
306void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); 308void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
307 309
310static inline bool __kvm_cpu_uses_extended_idmap(void)
311{
312 return __cpu_uses_extended_idmap();
313}
314
315static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
316 pgd_t *hyp_pgd,
317 pgd_t *merged_hyp_pgd,
318 unsigned long hyp_idmap_start)
319{
320 int idmap_idx;
321
322 /*
323 * Use the first entry to access the HYP mappings. It is
324 * guaranteed to be free, otherwise we wouldn't use an
325 * extended idmap.
326 */
327 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
328 merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
329
330 /*
331 * Create another extended level entry that points to the boot HYP map,
332 * which contains an ID mapping of the HYP init code. We essentially
333 * merge the boot and runtime HYP maps by doing so, but they don't
334 * overlap anyway, so this is fine.
335 */
336 idmap_idx = hyp_idmap_start >> VA_BITS;
337 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
338 merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
339}
340
308#endif /* __ASSEMBLY__ */ 341#endif /* __ASSEMBLY__ */
309#endif /* __ARM64_KVM_MMU_H__ */ 342#endif /* __ARM64_KVM_MMU_H__ */