diff options
Diffstat (limited to 'arch/arm/kvm/mmu.c')
-rw-r--r-- | arch/arm/kvm/mmu.c | 41 |
1 files changed, 26 insertions, 15 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 965706578f13..84ba67b982c0 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -43,7 +43,14 @@ static phys_addr_t hyp_idmap_vector; | |||
43 | 43 | ||
44 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | 44 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
45 | { | 45 | { |
46 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); | 46 | /* |
47 | * This function also gets called when dealing with HYP page | ||
48 | * tables. As HYP doesn't have an associated struct kvm (and | ||
49 | * the HYP page tables are fairly static), we don't do | ||
50 | * anything there. | ||
51 | */ | ||
52 | if (kvm) | ||
53 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); | ||
47 | } | 54 | } |
48 | 55 | ||
49 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, | 56 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
@@ -78,18 +85,20 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) | |||
78 | return p; | 85 | return p; |
79 | } | 86 | } |
80 | 87 | ||
81 | static void clear_pud_entry(pud_t *pud) | 88 | static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) |
82 | { | 89 | { |
83 | pmd_t *pmd_table = pmd_offset(pud, 0); | 90 | pmd_t *pmd_table = pmd_offset(pud, 0); |
84 | pud_clear(pud); | 91 | pud_clear(pud); |
92 | kvm_tlb_flush_vmid_ipa(kvm, addr); | ||
85 | pmd_free(NULL, pmd_table); | 93 | pmd_free(NULL, pmd_table); |
86 | put_page(virt_to_page(pud)); | 94 | put_page(virt_to_page(pud)); |
87 | } | 95 | } |
88 | 96 | ||
89 | static void clear_pmd_entry(pmd_t *pmd) | 97 | static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) |
90 | { | 98 | { |
91 | pte_t *pte_table = pte_offset_kernel(pmd, 0); | 99 | pte_t *pte_table = pte_offset_kernel(pmd, 0); |
92 | pmd_clear(pmd); | 100 | pmd_clear(pmd); |
101 | kvm_tlb_flush_vmid_ipa(kvm, addr); | ||
93 | pte_free_kernel(NULL, pte_table); | 102 | pte_free_kernel(NULL, pte_table); |
94 | put_page(virt_to_page(pmd)); | 103 | put_page(virt_to_page(pmd)); |
95 | } | 104 | } |
@@ -100,11 +109,12 @@ static bool pmd_empty(pmd_t *pmd) | |||
100 | return page_count(pmd_page) == 1; | 109 | return page_count(pmd_page) == 1; |
101 | } | 110 | } |
102 | 111 | ||
103 | static void clear_pte_entry(pte_t *pte) | 112 | static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) |
104 | { | 113 | { |
105 | if (pte_present(*pte)) { | 114 | if (pte_present(*pte)) { |
106 | kvm_set_pte(pte, __pte(0)); | 115 | kvm_set_pte(pte, __pte(0)); |
107 | put_page(virt_to_page(pte)); | 116 | put_page(virt_to_page(pte)); |
117 | kvm_tlb_flush_vmid_ipa(kvm, addr); | ||
108 | } | 118 | } |
109 | } | 119 | } |
110 | 120 | ||
@@ -114,7 +124,8 @@ static bool pte_empty(pte_t *pte) | |||
114 | return page_count(pte_page) == 1; | 124 | return page_count(pte_page) == 1; |
115 | } | 125 | } |
116 | 126 | ||
117 | static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size) | 127 | static void unmap_range(struct kvm *kvm, pgd_t *pgdp, |
128 | unsigned long long start, u64 size) | ||
118 | { | 129 | { |
119 | pgd_t *pgd; | 130 | pgd_t *pgd; |
120 | pud_t *pud; | 131 | pud_t *pud; |
@@ -138,15 +149,15 @@ static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size) | |||
138 | } | 149 | } |
139 | 150 | ||
140 | pte = pte_offset_kernel(pmd, addr); | 151 | pte = pte_offset_kernel(pmd, addr); |
141 | clear_pte_entry(pte); | 152 | clear_pte_entry(kvm, pte, addr); |
142 | range = PAGE_SIZE; | 153 | range = PAGE_SIZE; |
143 | 154 | ||
144 | /* If we emptied the pte, walk back up the ladder */ | 155 | /* If we emptied the pte, walk back up the ladder */ |
145 | if (pte_empty(pte)) { | 156 | if (pte_empty(pte)) { |
146 | clear_pmd_entry(pmd); | 157 | clear_pmd_entry(kvm, pmd, addr); |
147 | range = PMD_SIZE; | 158 | range = PMD_SIZE; |
148 | if (pmd_empty(pmd)) { | 159 | if (pmd_empty(pmd)) { |
149 | clear_pud_entry(pud); | 160 | clear_pud_entry(kvm, pud, addr); |
150 | range = PUD_SIZE; | 161 | range = PUD_SIZE; |
151 | } | 162 | } |
152 | } | 163 | } |
@@ -165,14 +176,14 @@ void free_boot_hyp_pgd(void) | |||
165 | mutex_lock(&kvm_hyp_pgd_mutex); | 176 | mutex_lock(&kvm_hyp_pgd_mutex); |
166 | 177 | ||
167 | if (boot_hyp_pgd) { | 178 | if (boot_hyp_pgd) { |
168 | unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); | 179 | unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); |
169 | unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | 180 | unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
170 | kfree(boot_hyp_pgd); | 181 | kfree(boot_hyp_pgd); |
171 | boot_hyp_pgd = NULL; | 182 | boot_hyp_pgd = NULL; |
172 | } | 183 | } |
173 | 184 | ||
174 | if (hyp_pgd) | 185 | if (hyp_pgd) |
175 | unmap_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | 186 | unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
176 | 187 | ||
177 | kfree(init_bounce_page); | 188 | kfree(init_bounce_page); |
178 | init_bounce_page = NULL; | 189 | init_bounce_page = NULL; |
@@ -200,9 +211,10 @@ void free_hyp_pgds(void) | |||
200 | 211 | ||
201 | if (hyp_pgd) { | 212 | if (hyp_pgd) { |
202 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) | 213 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) |
203 | unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); | 214 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
204 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) | 215 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) |
205 | unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); | 216 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
217 | |||
206 | kfree(hyp_pgd); | 218 | kfree(hyp_pgd); |
207 | hyp_pgd = NULL; | 219 | hyp_pgd = NULL; |
208 | } | 220 | } |
@@ -393,7 +405,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) | |||
393 | */ | 405 | */ |
394 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | 406 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) |
395 | { | 407 | { |
396 | unmap_range(kvm->arch.pgd, start, size); | 408 | unmap_range(kvm, kvm->arch.pgd, start, size); |
397 | } | 409 | } |
398 | 410 | ||
399 | /** | 411 | /** |
@@ -675,7 +687,6 @@ static void handle_hva_to_gpa(struct kvm *kvm, | |||
675 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | 687 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) |
676 | { | 688 | { |
677 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); | 689 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); |
678 | kvm_tlb_flush_vmid_ipa(kvm, gpa); | ||
679 | } | 690 | } |
680 | 691 | ||
681 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 692 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |