aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@ozlabs.org>2018-02-23 05:40:49 -0500
committerPaul Mackerras <paulus@ozlabs.org>2018-03-18 19:08:38 -0400
commitc4c8a7643e74ebd7f2cfa80807562f16bb58c1d9 (patch)
tree546604c43e6a6872abd50364ad7cf8a2fcc8447c
parent39c983ea0f96a270d4876c4148e3bb2d9cd3294f (diff)
KVM: PPC: Book3S HV: Radix page fault handler optimizations
This improves the handling of transparent huge pages in the radix hypervisor page fault handler. Previously, if a small page is faulted in to a 2MB region of guest physical space, that means that there is a page table pointer at the PMD level, which could never be replaced by a leaf (2MB) PMD entry. This adds the code to clear the PMD, invlidate the page walk cache and free the page table page in this situation, so that the leaf PMD entry can be created. This also adds code to check whether a PMD or PTE being inserted is the same as is already there (because of a race with another CPU that faulted on the same page) and if so, we don't replace the existing entry, meaning that we don't invalidate the PTE or PMD and do a TLB invalidation. Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c42
1 files changed, 27 insertions, 15 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 5cb4e4687107..ed62164f8474 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -160,6 +160,17 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
160 asm volatile("ptesync": : :"memory"); 160 asm volatile("ptesync": : :"memory");
161} 161}
162 162
163static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr)
164{
165 unsigned long rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
166
167 asm volatile("ptesync": : :"memory");
168 /* RIC=1 PRS=0 R=1 IS=2 */
169 asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1)
170 : : "r" (rb), "r" (kvm->arch.lpid) : "memory");
171 asm volatile("ptesync": : :"memory");
172}
173
163unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, 174unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
164 unsigned long clr, unsigned long set, 175 unsigned long clr, unsigned long set,
165 unsigned long addr, unsigned int shift) 176 unsigned long addr, unsigned int shift)
@@ -261,6 +272,11 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
261 ret = -EAGAIN; 272 ret = -EAGAIN;
262 goto out_unlock; 273 goto out_unlock;
263 } 274 }
275 /* Check if we raced and someone else has set the same thing */
276 if (level == 1 && pmd_raw(*pmd) == pte_raw(pte)) {
277 ret = 0;
278 goto out_unlock;
279 }
264 /* Valid 2MB page here already, remove it */ 280 /* Valid 2MB page here already, remove it */
265 old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), 281 old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
266 ~0UL, 0, lgpa, PMD_SHIFT); 282 ~0UL, 0, lgpa, PMD_SHIFT);
@@ -275,12 +291,13 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
275 } 291 }
276 } else if (level == 1 && !pmd_none(*pmd)) { 292 } else if (level == 1 && !pmd_none(*pmd)) {
277 /* 293 /*
278 * There's a page table page here, but we wanted 294 * There's a page table page here, but we wanted to
279 * to install a large page. Tell the caller and let 295 * install a large page, so remove and free the page
280 * it try installing a normal page if it wants. 296 * table page. new_ptep will be NULL since level == 1.
281 */ 297 */
282 ret = -EBUSY; 298 new_ptep = pte_offset_kernel(pmd, 0);
283 goto out_unlock; 299 pmd_clear(pmd);
300 kvmppc_radix_flush_pwc(kvm, gpa);
284 } 301 }
285 if (level == 0) { 302 if (level == 0) {
286 if (pmd_none(*pmd)) { 303 if (pmd_none(*pmd)) {
@@ -291,6 +308,11 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
291 } 308 }
292 ptep = pte_offset_kernel(pmd, gpa); 309 ptep = pte_offset_kernel(pmd, gpa);
293 if (pte_present(*ptep)) { 310 if (pte_present(*ptep)) {
311 /* Check if someone else set the same thing */
312 if (pte_raw(*ptep) == pte_raw(pte)) {
313 ret = 0;
314 goto out_unlock;
315 }
294 /* PTE was previously valid, so invalidate it */ 316 /* PTE was previously valid, so invalidate it */
295 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 317 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT,
296 0, gpa, 0); 318 0, gpa, 0);
@@ -469,16 +491,6 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
469 491
470 /* Allocate space in the tree and write the PTE */ 492 /* Allocate space in the tree and write the PTE */
471 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq); 493 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
472 if (ret == -EBUSY) {
473 /*
474 * There's already a PMD where wanted to install a large page;
475 * for now, fall back to installing a small page.
476 */
477 level = 0;
478 pfn |= gfn & ((PMD_SIZE >> PAGE_SHIFT) - 1);
479 pte = pfn_pte(pfn, __pgprot(pgflags));
480 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
481 }
482 494
483 if (page) { 495 if (page) {
484 if (!ret && (pgflags & _PAGE_WRITE)) 496 if (!ret && (pgflags & _PAGE_WRITE))