diff options
Diffstat (limited to 'arch/powerpc/mm/fault.c')
-rw-r--r-- | arch/powerpc/mm/fault.c | 46 |
1 files changed, 17 insertions, 29 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 91c7b8636b8..76993941cac 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -253,45 +253,33 @@ good_area: | |||
253 | #endif /* CONFIG_8xx */ | 253 | #endif /* CONFIG_8xx */ |
254 | 254 | ||
255 | if (is_exec) { | 255 | if (is_exec) { |
256 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | 256 | #ifdef CONFIG_PPC_STD_MMU |
257 | /* protection fault */ | 257 | /* Protection fault on exec go straight to failure on |
258 | * Hash based MMUs as they either don't support per-page | ||
259 | * execute permission, or if they do, it's handled already | ||
260 | * at the hash level. This test would probably have to | ||
261 | * be removed if we change the way this works to make hash | ||
262 | * processors use the same I/D cache coherency mechanism | ||
263 | * as embedded. | ||
264 | */ | ||
258 | if (error_code & DSISR_PROTFAULT) | 265 | if (error_code & DSISR_PROTFAULT) |
259 | goto bad_area; | 266 | goto bad_area; |
267 | #endif /* CONFIG_PPC_STD_MMU */ | ||
268 | |||
260 | /* | 269 | /* |
261 | * Allow execution from readable areas if the MMU does not | 270 | * Allow execution from readable areas if the MMU does not |
262 | * provide separate controls over reading and executing. | 271 | * provide separate controls over reading and executing. |
272 | * | ||
273 | * Note: That code used to not be enabled for 4xx/BookE. | ||
274 | * It is now as I/D cache coherency for these is done at | ||
275 | * set_pte_at() time and I see no reason why the test | ||
276 | * below wouldn't be valid on those processors. This -may- | ||
277 | * break programs compiled with a really old ABI though. | ||
263 | */ | 278 | */ |
264 | if (!(vma->vm_flags & VM_EXEC) && | 279 | if (!(vma->vm_flags & VM_EXEC) && |
265 | (cpu_has_feature(CPU_FTR_NOEXECUTE) || | 280 | (cpu_has_feature(CPU_FTR_NOEXECUTE) || |
266 | !(vma->vm_flags & (VM_READ | VM_WRITE)))) | 281 | !(vma->vm_flags & (VM_READ | VM_WRITE)))) |
267 | goto bad_area; | 282 | goto bad_area; |
268 | #else | ||
269 | pte_t *ptep; | ||
270 | pmd_t *pmdp; | ||
271 | |||
272 | /* Since 4xx/Book-E supports per-page execute permission, | ||
273 | * we lazily flush dcache to icache. */ | ||
274 | ptep = NULL; | ||
275 | if (get_pteptr(mm, address, &ptep, &pmdp)) { | ||
276 | spinlock_t *ptl = pte_lockptr(mm, pmdp); | ||
277 | spin_lock(ptl); | ||
278 | if (pte_present(*ptep)) { | ||
279 | struct page *page = pte_page(*ptep); | ||
280 | |||
281 | if (!test_bit(PG_arch_1, &page->flags)) { | ||
282 | flush_dcache_icache_page(page); | ||
283 | set_bit(PG_arch_1, &page->flags); | ||
284 | } | ||
285 | pte_update(ptep, 0, _PAGE_HWEXEC | | ||
286 | _PAGE_ACCESSED); | ||
287 | local_flush_tlb_page(vma, address); | ||
288 | pte_unmap_unlock(ptep, ptl); | ||
289 | up_read(&mm->mmap_sem); | ||
290 | return 0; | ||
291 | } | ||
292 | pte_unmap_unlock(ptep, ptl); | ||
293 | } | ||
294 | #endif | ||
295 | /* a write */ | 283 | /* a write */ |
296 | } else if (is_write) { | 284 | } else if (is_write) { |
297 | if (!(vma->vm_flags & VM_WRITE)) | 285 | if (!(vma->vm_flags & VM_WRITE)) |