aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/kvm_mmu.h31
-rw-r--r--arch/arm/kvm/mmu.c82
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h18
3 files changed, 116 insertions, 15 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 286644c729ba..552c31f5a3f7 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -44,6 +44,7 @@
44 44
45#ifndef __ASSEMBLY__ 45#ifndef __ASSEMBLY__
46 46
47#include <linux/highmem.h>
47#include <asm/cacheflush.h> 48#include <asm/cacheflush.h>
48#include <asm/pgalloc.h> 49#include <asm/pgalloc.h>
49 50
@@ -188,6 +189,36 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
188 } 189 }
189} 190}
190 191
192static inline void __kvm_flush_dcache_pte(pte_t pte)
193{
194 void *va = kmap_atomic(pte_page(pte));
195
196 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
197
198 kunmap_atomic(va);
199}
200
201static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
202{
203 unsigned long size = PMD_SIZE;
204 pfn_t pfn = pmd_pfn(pmd);
205
206 while (size) {
207 void *va = kmap_atomic_pfn(pfn);
208
209 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
210
211 pfn++;
212 size -= PAGE_SIZE;
213
214 kunmap_atomic(va);
215 }
216}
217
218static inline void __kvm_flush_dcache_pud(pud_t pud)
219{
220}
221
191#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) 222#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
192 223
193void kvm_set_way_flush(struct kvm_vcpu *vcpu); 224void kvm_set_way_flush(struct kvm_vcpu *vcpu);
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 106737e309b1..78e68abcb01f 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -58,6 +58,26 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
58 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); 58 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
59} 59}
60 60
61/*
62 * D-Cache management functions. They take the page table entries by
63 * value, as they are flushing the cache using the kernel mapping (or
64 * kmap on 32bit).
65 */
66static void kvm_flush_dcache_pte(pte_t pte)
67{
68 __kvm_flush_dcache_pte(pte);
69}
70
71static void kvm_flush_dcache_pmd(pmd_t pmd)
72{
73 __kvm_flush_dcache_pmd(pmd);
74}
75
76static void kvm_flush_dcache_pud(pud_t pud)
77{
78 __kvm_flush_dcache_pud(pud);
79}
80
61static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 81static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
62 int min, int max) 82 int min, int max)
63{ 83{
@@ -119,6 +139,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
119 put_page(virt_to_page(pmd)); 139 put_page(virt_to_page(pmd));
120} 140}
121 141
142/*
143 * Unmapping vs dcache management:
144 *
145 * If a guest maps certain memory pages as uncached, all writes will
146 * bypass the data cache and go directly to RAM. However, the CPUs
147 * can still speculate reads (not writes) and fill cache lines with
148 * data.
149 *
150 * Those cache lines will be *clean* cache lines though, so a
151 * clean+invalidate operation is equivalent to an invalidate
152 * operation, because no cache lines are marked dirty.
153 *
154 * Those clean cache lines could be filled prior to an uncached write
155 * by the guest, and the cache coherent IO subsystem would therefore
156 * end up writing old data to disk.
157 *
158 * This is why right after unmapping a page/section and invalidating
159 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
160 * the IO subsystem will never hit in the cache.
161 */
122static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, 162static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
123 phys_addr_t addr, phys_addr_t end) 163 phys_addr_t addr, phys_addr_t end)
124{ 164{
@@ -128,9 +168,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
128 start_pte = pte = pte_offset_kernel(pmd, addr); 168 start_pte = pte = pte_offset_kernel(pmd, addr);
129 do { 169 do {
130 if (!pte_none(*pte)) { 170 if (!pte_none(*pte)) {
171 pte_t old_pte = *pte;
172
131 kvm_set_pte(pte, __pte(0)); 173 kvm_set_pte(pte, __pte(0));
132 put_page(virt_to_page(pte));
133 kvm_tlb_flush_vmid_ipa(kvm, addr); 174 kvm_tlb_flush_vmid_ipa(kvm, addr);
175
176 /* No need to invalidate the cache for device mappings */
177 if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
178 kvm_flush_dcache_pte(old_pte);
179
180 put_page(virt_to_page(pte));
134 } 181 }
135 } while (pte++, addr += PAGE_SIZE, addr != end); 182 } while (pte++, addr += PAGE_SIZE, addr != end);
136 183
@@ -149,8 +196,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
149 next = kvm_pmd_addr_end(addr, end); 196 next = kvm_pmd_addr_end(addr, end);
150 if (!pmd_none(*pmd)) { 197 if (!pmd_none(*pmd)) {
151 if (kvm_pmd_huge(*pmd)) { 198 if (kvm_pmd_huge(*pmd)) {
199 pmd_t old_pmd = *pmd;
200
152 pmd_clear(pmd); 201 pmd_clear(pmd);
153 kvm_tlb_flush_vmid_ipa(kvm, addr); 202 kvm_tlb_flush_vmid_ipa(kvm, addr);
203
204 kvm_flush_dcache_pmd(old_pmd);
205
154 put_page(virt_to_page(pmd)); 206 put_page(virt_to_page(pmd));
155 } else { 207 } else {
156 unmap_ptes(kvm, pmd, addr, next); 208 unmap_ptes(kvm, pmd, addr, next);
@@ -173,8 +225,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
173 next = kvm_pud_addr_end(addr, end); 225 next = kvm_pud_addr_end(addr, end);
174 if (!pud_none(*pud)) { 226 if (!pud_none(*pud)) {
175 if (pud_huge(*pud)) { 227 if (pud_huge(*pud)) {
228 pud_t old_pud = *pud;
229
176 pud_clear(pud); 230 pud_clear(pud);
177 kvm_tlb_flush_vmid_ipa(kvm, addr); 231 kvm_tlb_flush_vmid_ipa(kvm, addr);
232
233 kvm_flush_dcache_pud(old_pud);
234
178 put_page(virt_to_page(pud)); 235 put_page(virt_to_page(pud));
179 } else { 236 } else {
180 unmap_pmds(kvm, pud, addr, next); 237 unmap_pmds(kvm, pud, addr, next);
@@ -209,10 +266,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
209 266
210 pte = pte_offset_kernel(pmd, addr); 267 pte = pte_offset_kernel(pmd, addr);
211 do { 268 do {
212 if (!pte_none(*pte)) { 269 if (!pte_none(*pte) &&
213 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); 270 (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
214 kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE); 271 kvm_flush_dcache_pte(*pte);
215 }
216 } while (pte++, addr += PAGE_SIZE, addr != end); 272 } while (pte++, addr += PAGE_SIZE, addr != end);
217} 273}
218 274
@@ -226,12 +282,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
226 do { 282 do {
227 next = kvm_pmd_addr_end(addr, end); 283 next = kvm_pmd_addr_end(addr, end);
228 if (!pmd_none(*pmd)) { 284 if (!pmd_none(*pmd)) {
229 if (kvm_pmd_huge(*pmd)) { 285 if (kvm_pmd_huge(*pmd))
230 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); 286 kvm_flush_dcache_pmd(*pmd);
231 kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE); 287 else
232 } else {
233 stage2_flush_ptes(kvm, pmd, addr, next); 288 stage2_flush_ptes(kvm, pmd, addr, next);
234 }
235 } 289 }
236 } while (pmd++, addr = next, addr != end); 290 } while (pmd++, addr = next, addr != end);
237} 291}
@@ -246,12 +300,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
246 do { 300 do {
247 next = kvm_pud_addr_end(addr, end); 301 next = kvm_pud_addr_end(addr, end);
248 if (!pud_none(*pud)) { 302 if (!pud_none(*pud)) {
249 if (pud_huge(*pud)) { 303 if (pud_huge(*pud))
250 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); 304 kvm_flush_dcache_pud(*pud);
251 kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE); 305 else
252 } else {
253 stage2_flush_pmds(kvm, pud, addr, next); 306 stage2_flush_pmds(kvm, pud, addr, next);
254 }
255 } 307 }
256 } while (pud++, addr = next, addr != end); 308 } while (pud++, addr = next, addr != end);
257} 309}
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 92d22e94a79b..cbdc236d81f8 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -258,6 +258,24 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
258 } 258 }
259} 259}
260 260
261static inline void __kvm_flush_dcache_pte(pte_t pte)
262{
263 struct page *page = pte_page(pte);
264 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
265}
266
267static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
268{
269 struct page *page = pmd_page(pmd);
270 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
271}
272
273static inline void __kvm_flush_dcache_pud(pud_t pud)
274{
275 struct page *page = pud_page(pud);
276 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
277}
278
261#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) 279#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
262 280
263void kvm_set_way_flush(struct kvm_vcpu *vcpu); 281void kvm_set_way_flush(struct kvm_vcpu *vcpu);