aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/pte-common.h
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-18 15:00:34 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-26 23:12:51 -0400
commitea3cc330ac0cd521ff07c7cd432a1848c19a7e92 (patch)
tree82f3e84e28ebf5ae33d05ec0142c22b30a21c60a /arch/powerpc/include/asm/pte-common.h
parentf480fe3916de2e2cbb6e384cb685f0f1d8272188 (diff)
powerpc/mm: Cleanup handling of execute permission
This is an attempt at cleaning up a bit the way we handle execute permission on powerpc. _PAGE_HWEXEC is gone, _PAGE_EXEC is now only defined by CPUs that can do something with it, and the myriad of #ifdef's in the I$/D$ coherency code is reduced to 2 cases that hopefully should cover everything. The logic on BookE is a little bit different than what it was though not by much. Since now, _PAGE_EXEC will be set by the generic code for executable pages, we need to filter out if they are unclean and recover it. However, I don't expect the code to be more bloated than it already was in that area due to that change. I could boast that this brings proper enforcing of per-page execute permissions to all BookE and 40x but in fact, we've had that now for some time as a side effect of my previous rework in that area (and I didn't even know it :-) We would only enable execute permission if the page was cache clean and we would only cache clean it if we took and exec fault. Since we now enforce that the later only work if VM_EXEC is part of the VMA flags, we de-fact already enforce per-page execute permissions... Unless I missed something Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/pte-common.h')
-rw-r--r--arch/powerpc/include/asm/pte-common.h22
1 files changed, 11 insertions, 11 deletions
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index 8bb6464ba619..c3b65076a263 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -13,9 +13,6 @@
13#ifndef _PAGE_HWWRITE 13#ifndef _PAGE_HWWRITE
14#define _PAGE_HWWRITE 0 14#define _PAGE_HWWRITE 0
15#endif 15#endif
16#ifndef _PAGE_HWEXEC
17#define _PAGE_HWEXEC 0
18#endif
19#ifndef _PAGE_EXEC 16#ifndef _PAGE_EXEC
20#define _PAGE_EXEC 0 17#define _PAGE_EXEC 0
21#endif 18#endif
@@ -48,10 +45,16 @@
48#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() 45#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
49#endif 46#endif
50#ifndef _PAGE_KERNEL_RO 47#ifndef _PAGE_KERNEL_RO
51#define _PAGE_KERNEL_RO 0 48#define _PAGE_KERNEL_RO 0
49#endif
50#ifndef _PAGE_KERNEL_ROX
51#define _PAGE_KERNEL_ROX (_PAGE_EXEC)
52#endif 52#endif
53#ifndef _PAGE_KERNEL_RW 53#ifndef _PAGE_KERNEL_RW
54#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) 54#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
55#endif
56#ifndef _PAGE_KERNEL_RWX
57#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC)
55#endif 58#endif
56#ifndef _PAGE_HPTEFLAGS 59#ifndef _PAGE_HPTEFLAGS
57#define _PAGE_HPTEFLAGS _PAGE_HASHPTE 60#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
@@ -96,8 +99,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
96#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ 99#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
97 _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \ 100 _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \
98 _PAGE_USER | _PAGE_ACCESSED | \ 101 _PAGE_USER | _PAGE_ACCESSED | \
99 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ 102 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC)
100 _PAGE_EXEC | _PAGE_HWEXEC)
101 103
102/* 104/*
103 * We define 2 sets of base prot bits, one for basic pages (ie, 105 * We define 2 sets of base prot bits, one for basic pages (ie,
@@ -154,11 +156,9 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
154 _PAGE_NO_CACHE) 156 _PAGE_NO_CACHE)
155#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ 157#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
156 _PAGE_NO_CACHE | _PAGE_GUARDED) 158 _PAGE_NO_CACHE | _PAGE_GUARDED)
157#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC | \ 159#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
158 _PAGE_HWEXEC)
159#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) 160#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
160#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC | \ 161#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
161 _PAGE_HWEXEC)
162 162
163/* Protection used for kernel text. We want the debuggers to be able to 163/* Protection used for kernel text. We want the debuggers to be able to
164 * set breakpoints anywhere, so don't write protect the kernel text 164 * set breakpoints anywhere, so don't write protect the kernel text