aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/kvm_mmu.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/kvm_mmu.h')
-rw-r--r--arch/arm/include/asm/kvm_mmu.h67
1 files changed, 67 insertions, 0 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 421a20b34874..970f3b5fa109 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -19,6 +19,18 @@
19#ifndef __ARM_KVM_MMU_H__ 19#ifndef __ARM_KVM_MMU_H__
20#define __ARM_KVM_MMU_H__ 20#define __ARM_KVM_MMU_H__
21 21
22#include <asm/cacheflush.h>
23#include <asm/pgalloc.h>
24#include <asm/idmap.h>
25
26/*
27 * We directly use the kernel VA for the HYP, as we can directly share
28 * the mapping (HTTBR "covers" TTBR1).
29 */
30#define HYP_PAGE_OFFSET_MASK (~0UL)
31#define HYP_PAGE_OFFSET PAGE_OFFSET
32#define KERN_TO_HYP(kva) (kva)
33
22int create_hyp_mappings(void *from, void *to); 34int create_hyp_mappings(void *from, void *to);
23int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 35int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
24void free_hyp_pmds(void); 36void free_hyp_pmds(void);
@@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void);
36int kvm_mmu_init(void); 48int kvm_mmu_init(void);
37void kvm_clear_hyp_idmap(void); 49void kvm_clear_hyp_idmap(void);
38 50
51static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
52{
53 pte_val(*pte) = new_pte;
54 /*
55 * flush_pmd_entry just takes a void pointer and cleans the necessary
56 * cache entries, so we can reuse the function for ptes.
57 */
58 flush_pmd_entry(pte);
59}
60
39static inline bool kvm_is_write_fault(unsigned long hsr) 61static inline bool kvm_is_write_fault(unsigned long hsr)
40{ 62{
41 unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; 63 unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
@@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr)
47 return true; 69 return true;
48} 70}
49 71
72static inline void kvm_clean_pgd(pgd_t *pgd)
73{
74 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
75}
76
77static inline void kvm_clean_pmd_entry(pmd_t *pmd)
78{
79 clean_pmd_entry(pmd);
80}
81
82static inline void kvm_clean_pte(pte_t *pte)
83{
84 clean_pte_table(pte);
85}
86
87static inline void kvm_set_s2pte_writable(pte_t *pte)
88{
89 pte_val(*pte) |= L_PTE_S2_RDWR;
90}
91
92struct kvm;
93
94static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
95{
96 /*
97 * If we are going to insert an instruction page and the icache is
98 * either VIPT or PIPT, there is a potential problem where the host
99 * (or another VM) may have used the same page as this guest, and we
100 * read incorrect data from the icache. If we're using a PIPT cache,
101 * we can invalidate just that page, but if we are using a VIPT cache
102 * we need to invalidate the entire icache - damn shame - as written
103 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
104 *
105 * VIVT caches are tagged using both the ASID and the VMID and doesn't
106 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
107 */
108 if (icache_is_pipt()) {
109 unsigned long hva = gfn_to_hva(kvm, gfn);
110 __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
111 } else if (!icache_is_vivt_asid_tagged()) {
112 /* any kind of VIPT cache */
113 __flush_icache_all();
114 }
115}
116
50#endif /* __ARM_KVM_MMU_H__ */ 117#endif /* __ARM_KVM_MMU_H__ */