aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2015-02-12 17:58:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 21:54:08 -0500
commit842915f56667f9eebd85932f08c79565148c26d6 (patch)
tree4d8f962c206f28965c8d94da40d6237b94ce8f66
parent8a0516ed8b90c95ffa1363b420caa37418149f21 (diff)
ppc64: add paranoid warnings for unexpected DSISR_PROTFAULT
ppc64 should not be depending on DSISR_PROTFAULT and it's unexpected if they are triggered. This patch adds warnings just in case they are being accidentally depended upon. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Tested-by: Sasha Levin <sasha.levin@oracle.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Dave Jones <davej@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Kirill Shutemov <kirill.shutemov@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/powerpc/mm/copro_fault.c8
-rw-r--r--arch/powerpc/mm/fault.c20
2 files changed, 15 insertions, 13 deletions
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 1b5305d4bdab..f031a47d7701 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -64,10 +64,14 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
64 if (!(vma->vm_flags & VM_WRITE)) 64 if (!(vma->vm_flags & VM_WRITE))
65 goto out_unlock; 65 goto out_unlock;
66 } else { 66 } else {
67 if (dsisr & DSISR_PROTFAULT)
68 goto out_unlock;
69 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 67 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
70 goto out_unlock; 68 goto out_unlock;
69 /*
70 * protfault should only happen due to us
71 * mapping a region readonly temporarily. PROT_NONE
72 * is also covered by the VMA check above.
73 */
74 WARN_ON_ONCE(dsisr & DSISR_PROTFAULT);
71 } 75 }
72 76
73 ret = 0; 77 ret = 0;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index f38327b95f76..b396868d2aa7 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -389,17 +389,6 @@ good_area:
389#endif /* CONFIG_8xx */ 389#endif /* CONFIG_8xx */
390 390
391 if (is_exec) { 391 if (is_exec) {
392#ifdef CONFIG_PPC_STD_MMU
393 /* Protection fault on exec go straight to failure on
394 * Hash based MMUs as they either don't support per-page
395 * execute permission, or if they do, it's handled already
396 * at the hash level. This test would probably have to
397 * be removed if we change the way this works to make hash
398 * processors use the same I/D cache coherency mechanism
399 * as embedded.
400 */
401#endif /* CONFIG_PPC_STD_MMU */
402
403 /* 392 /*
404 * Allow execution from readable areas if the MMU does not 393 * Allow execution from readable areas if the MMU does not
405 * provide separate controls over reading and executing. 394 * provide separate controls over reading and executing.
@@ -414,6 +403,14 @@ good_area:
414 (cpu_has_feature(CPU_FTR_NOEXECUTE) || 403 (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
415 !(vma->vm_flags & (VM_READ | VM_WRITE)))) 404 !(vma->vm_flags & (VM_READ | VM_WRITE))))
416 goto bad_area; 405 goto bad_area;
406#ifdef CONFIG_PPC_STD_MMU
407 /*
408 * protfault should only happen due to us
409 * mapping a region readonly temporarily. PROT_NONE
410 * is also covered by the VMA check above.
411 */
412 WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
413#endif /* CONFIG_PPC_STD_MMU */
417 /* a write */ 414 /* a write */
418 } else if (is_write) { 415 } else if (is_write) {
419 if (!(vma->vm_flags & VM_WRITE)) 416 if (!(vma->vm_flags & VM_WRITE))
@@ -423,6 +420,7 @@ good_area:
423 } else { 420 } else {
424 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) 421 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
425 goto bad_area; 422 goto bad_area;
423 WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
426 } 424 }
427 425
428 /* 426 /*