aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/pgtable-ppc64.h
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-03-19 15:34:09 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-03-23 22:47:33 -0400
commit71087002cf807e25056dba4e4028a9f204dc9ffd (patch)
tree60b64edb2c79b3183bb8187a3492994a72453b94 /arch/powerpc/include/asm/pgtable-ppc64.h
parent8d1cf34e7ad5c7738ce20d20bd7f002f562cb8b5 (diff)
powerpc/mm: Merge various PTE bits and accessors definitions
Now that they are almost identical, we can merge some of the definitions related to the PTE format into common files. This creates a new pte-common.h which is included by both 32 and 64-bit right after the CPU specific pte-*.h file, and which defines some bits to "default" values if they haven't been defined already, and then provides a generic definition of most of the bit combinations based on these and exposed to the rest of the kernel. I also moved to the common pgtable.h most of the "small" accessors to the PTE bits and modification helpers (pte_mk*). The actual accessors remain in their separate files. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/pgtable-ppc64.h')
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h132
1 files changed, 1 insertions, 131 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 5a575f2905f5..768e0f08f009 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -80,82 +80,8 @@
80 * Include the PTE bits definitions 80 * Include the PTE bits definitions
81 */ 81 */
82#include <asm/pte-hash64.h> 82#include <asm/pte-hash64.h>
83#include <asm/pte-common.h>
83 84
84/* Some other useful definitions */
85#define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT))
86#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
87
88/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
89 * pgprot changes
90 */
91#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
92 _PAGE_ACCESSED | _PAGE_SPECIAL)
93
94#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
95#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
96
97
98/* Permission masks used to generate the __P and __S table,
99 *
100 * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
101 */
102#define PAGE_NONE __pgprot(_PAGE_BASE)
103#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
104#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
105#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
106#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
107#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
108#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
109
110/* Permission masks used for kernel mappings */
111#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
112#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
113 _PAGE_NO_CACHE)
114#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
115 _PAGE_NO_CACHE | _PAGE_GUARDED)
116#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC)
117#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
118#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC)
119
120/* Protection bits for use by pte_pgprot() */
121#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | \
122 _PAGE_NO_CACHE | _PAGE_WRITETHRU | \
123 _PAGE_4K_PFN | _PAGE_USER | _PAGE_RW | \
124 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
125
126
127/* We always have _PAGE_SPECIAL on 64 bit */
128#define __HAVE_ARCH_PTE_SPECIAL
129
130/* Make modules code happy. We don't set RO yet */
131#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
132
133/*
134 * POWER4 and newer have per page execute protection, older chips can only
135 * do this on a segment (256MB) basis.
136 *
137 * Also, write permissions imply read permissions.
138 * This is the closest we can get..
139 *
140 * Note due to the way vm flags are laid out, the bits are XWR
141 */
142#define __P000 PAGE_NONE
143#define __P001 PAGE_READONLY
144#define __P010 PAGE_COPY
145#define __P011 PAGE_COPY
146#define __P100 PAGE_READONLY_X
147#define __P101 PAGE_READONLY_X
148#define __P110 PAGE_COPY_X
149#define __P111 PAGE_COPY_X
150
151#define __S000 PAGE_NONE
152#define __S001 PAGE_READONLY
153#define __S010 PAGE_SHARED
154#define __S011 PAGE_SHARED
155#define __S100 PAGE_READONLY_X
156#define __S101 PAGE_READONLY_X
157#define __S110 PAGE_SHARED_X
158#define __S111 PAGE_SHARED_X
159 85
160#ifdef CONFIG_PPC_MM_SLICES 86#ifdef CONFIG_PPC_MM_SLICES
161#define HAVE_ARCH_UNMAPPED_AREA 87#define HAVE_ARCH_UNMAPPED_AREA
@@ -196,34 +122,8 @@
196#endif /* __real_pte */ 122#endif /* __real_pte */
197 123
198 124
199/*
200 * Conversion functions: convert a page and protection to a page entry,
201 * and a page entry and page directory to the page they refer to.
202 *
203 * mk_pte takes a (struct page *) as input
204 */
205#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
206
207static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
208{
209 pte_t pte;
210
211
212 pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot);
213 return pte;
214}
215
216#define pte_modify(_pte, newprot) \
217 (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
218
219#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0)
220#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
221
222/* pte_clear moved to later in this file */ 125/* pte_clear moved to later in this file */
223 126
224#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT)))
225#define pte_page(x) pfn_to_page(pte_pfn(x))
226
227#define PMD_BAD_BITS (PTE_TABLE_SIZE-1) 127#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
228#define PUD_BAD_BITS (PMD_TABLE_SIZE-1) 128#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
229 129
@@ -271,36 +171,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
271/* This now only contains the vmalloc pages */ 171/* This now only contains the vmalloc pages */
272#define pgd_offset_k(address) pgd_offset(&init_mm, address) 172#define pgd_offset_k(address) pgd_offset(&init_mm, address)
273 173
274/*
275 * The following only work if pte_present() is true.
276 * Undefined behaviour if not..
277 */
278static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
279static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
280static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
281static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
282static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
283
284static inline pte_t pte_wrprotect(pte_t pte) {
285 pte_val(pte) &= ~(_PAGE_RW); return pte; }
286static inline pte_t pte_mkclean(pte_t pte) {
287 pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
288static inline pte_t pte_mkold(pte_t pte) {
289 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
290static inline pte_t pte_mkwrite(pte_t pte) {
291 pte_val(pte) |= _PAGE_RW; return pte; }
292static inline pte_t pte_mkdirty(pte_t pte) {
293 pte_val(pte) |= _PAGE_DIRTY; return pte; }
294static inline pte_t pte_mkyoung(pte_t pte) {
295 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
296static inline pte_t pte_mkhuge(pte_t pte) {
297 return pte; }
298static inline pte_t pte_mkspecial(pte_t pte) {
299 pte_val(pte) |= _PAGE_SPECIAL; return pte; }
300static inline pgprot_t pte_pgprot(pte_t pte)
301{
302 return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
303}
304 174
305/* Atomic PTE updates */ 175/* Atomic PTE updates */
306static inline unsigned long pte_update(struct mm_struct *mm, 176static inline unsigned long pte_update(struct mm_struct *mm,