aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2017-01-30 11:12:59 -0500
committerMichael Ellerman <mpe@ellerman.id.au>2017-02-15 04:02:39 -0500
commit18061c17c8ecdbdbf1e7d1695ec44e7388b4f601 (patch)
treee5d6e802db09f7302eb101ce0a12970d0861fa51
parentc21a493a2b44650707d06741601894329486f2ad (diff)
powerpc/mm: Update PROTFAULT handling in the page fault path
With radix, we can get page fault with DSISR_PROTFAULT value set in case of PROT_NONE or autonuma mapping. The PROT_NONE case in handled by the vma check where we consider the access bad. For autonuma we should fall through and fixup the access mask correctly. Without this patch we trigger the WARN_ON() on radix. This code moves that WARN_ON() within a radix_enabled() check. I also moved the WARN_ON() outside the if condition making it apply for all type of faults (exec/write/read). It is also conditionalized for book3s, because BOOK3E can also get a PROTFAULT to handle the D/I cache sync. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/mm/copro_fault.c10
-rw-r--r--arch/powerpc/mm/fault.c43
2 files changed, 39 insertions, 14 deletions
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index aaa7ec6788b9..697b70ad1195 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -67,11 +67,13 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
67 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 67 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
68 goto out_unlock; 68 goto out_unlock;
69 /* 69 /*
70 * protfault should only happen due to us 70 * PROT_NONE is covered by the VMA check above.
71 * mapping a region readonly temporarily. PROT_NONE 71 * and hash should get a NOHPTE fault instead of
72 * is also covered by the VMA check above. 72 * a PROTFAULT in case fixup is needed for things
73 * like autonuma.
73 */ 74 */
74 WARN_ON_ONCE(dsisr & DSISR_PROTFAULT); 75 if (!radix_enabled())
76 WARN_ON_ONCE(dsisr & DSISR_PROTFAULT);
75 } 77 }
76 78
77 ret = 0; 79 ret = 0;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 6fd30ac7d14a..c636137666c1 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -418,15 +418,6 @@ good_area:
418 (cpu_has_feature(CPU_FTR_NOEXECUTE) || 418 (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
419 !(vma->vm_flags & (VM_READ | VM_WRITE)))) 419 !(vma->vm_flags & (VM_READ | VM_WRITE))))
420 goto bad_area; 420 goto bad_area;
421
422#ifdef CONFIG_PPC_STD_MMU
423 /*
424 * protfault should only happen due to us
425 * mapping a region readonly temporarily. PROT_NONE
426 * is also covered by the VMA check above.
427 */
428 WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
429#endif /* CONFIG_PPC_STD_MMU */
430 /* a write */ 421 /* a write */
431 } else if (is_write) { 422 } else if (is_write) {
432 if (!(vma->vm_flags & VM_WRITE)) 423 if (!(vma->vm_flags & VM_WRITE))
@@ -436,8 +427,40 @@ good_area:
436 } else { 427 } else {
437 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) 428 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
438 goto bad_area; 429 goto bad_area;
439 WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
440 } 430 }
431#ifdef CONFIG_PPC_STD_MMU
432 /*
433 * For hash translation mode, we should never get a
434 * PROTFAULT. Any update to pte to reduce access will result in us
435 * removing the hash page table entry, thus resulting in a DSISR_NOHPTE
436 * fault instead of DSISR_PROTFAULT.
437 *
438 * A pte update to relax the access will not result in a hash page table
439 * entry invalidate and hence can result in DSISR_PROTFAULT.
440 * ptep_set_access_flags() doesn't do a hpte flush. This is why we have
441 * the special !is_write in the below conditional.
442 *
443 * For platforms that doesn't supports coherent icache and do support
444 * per page noexec bit, we do setup things such that we do the
445 * sync between D/I cache via fault. But that is handled via low level
446 * hash fault code (hash_page_do_lazy_icache()) and we should not reach
447 * here in such case.
448 *
449 * For wrong access that can result in PROTFAULT, the above vma->vm_flags
450 * check should handle those and hence we should fall to the bad_area
451 * handling correctly.
452 *
453 * For embedded with per page exec support that doesn't support coherent
454 * icache we do get PROTFAULT and we handle that D/I cache sync in
455 * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON
456 * is conditional for server MMU.
457 *
458 * For radix, we can get prot fault for autonuma case, because radix
459 * page table will have them marked noaccess for user.
460 */
461 if (!radix_enabled() && !is_write)
462 WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
463#endif /* CONFIG_PPC_STD_MMU */
441 464
442 /* 465 /*
443 * If for any reason at all we couldn't handle the fault, 466 * If for any reason at all we couldn't handle the fault,