aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-03-19 15:34:09 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-03-23 22:47:33 -0400
commit71087002cf807e25056dba4e4028a9f204dc9ffd (patch)
tree60b64edb2c79b3183bb8187a3492994a72453b94
parent8d1cf34e7ad5c7738ce20d20bd7f002f562cb8b5 (diff)
powerpc/mm: Merge various PTE bits and accessors definitions
Now that they are almost identical, we can merge some of the definitions related to the PTE format into common files. This creates a new pte-common.h which is included by both 32 and 64-bit right after the CPU specific pte-*.h file, and which defines some bits to "default" values if they haven't been defined already, and then provides a generic definition of most of the bit combinations based on these and exposed to the rest of the kernel. I also moved to the common pgtable.h most of the "small" accessors to the PTE bits and modification helpers (pte_mk*). The actual accessors remain in their separate files. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h204
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h132
-rw-r--r--arch/powerpc/include/asm/pgtable.h54
-rw-r--r--arch/powerpc/include/asm/pte-common.h180
4 files changed, 233 insertions, 337 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 7ce331e51f90..ba45c997830f 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -97,174 +97,11 @@ extern int icache_44x_need_flush;
97#include <asm/pte-hash32.h> 97#include <asm/pte-hash32.h>
98#endif 98#endif
99 99
100/* If _PAGE_SPECIAL is defined, then we advertise our support for it */ 100/* And here we include common definitions */
101#ifdef _PAGE_SPECIAL 101#include <asm/pte-common.h>
102#define __HAVE_ARCH_PTE_SPECIAL
103#endif
104
105/*
106 * Some bits are only used on some cpu families... Make sure that all
107 * the undefined gets defined as 0
108 */
109#ifndef _PAGE_HASHPTE
110#define _PAGE_HASHPTE 0
111#endif
112#ifndef _PTE_NONE_MASK
113#define _PTE_NONE_MASK 0
114#endif
115#ifndef _PAGE_SHARED
116#define _PAGE_SHARED 0
117#endif
118#ifndef _PAGE_HWWRITE
119#define _PAGE_HWWRITE 0
120#endif
121#ifndef _PAGE_HWEXEC
122#define _PAGE_HWEXEC 0
123#endif
124#ifndef _PAGE_EXEC
125#define _PAGE_EXEC 0
126#endif
127#ifndef _PAGE_ENDIAN
128#define _PAGE_ENDIAN 0
129#endif
130#ifndef _PAGE_COHERENT
131#define _PAGE_COHERENT 0
132#endif
133#ifndef _PAGE_WRITETHRU
134#define _PAGE_WRITETHRU 0
135#endif
136#ifndef _PAGE_SPECIAL
137#define _PAGE_SPECIAL 0
138#endif
139#ifndef _PMD_PRESENT_MASK
140#define _PMD_PRESENT_MASK _PMD_PRESENT
141#endif
142#ifndef _PMD_SIZE
143#define _PMD_SIZE 0
144#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
145#endif
146
147#ifndef _PAGE_KERNEL_RO
148#define _PAGE_KERNEL_RO 0
149#endif
150#ifndef _PAGE_KERNEL_RW
151#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
152#endif
153
154#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
155
156/* Location of the PFN in the PTE. Most platforms use the same as _PAGE_SHIFT
157 * here (ie, naturally aligned). Platform who don't just pre-define the
158 * value so we don't override it here
159 */
160#ifndef PTE_RPN_SHIFT
161#define PTE_RPN_SHIFT (PAGE_SHIFT)
162#endif
163
164#ifdef CONFIG_PTE_64BIT
165#define PTE_RPN_MAX (1ULL << (64 - PTE_RPN_SHIFT))
166#define PTE_RPN_MASK (~((1ULL<<PTE_RPN_SHIFT)-1))
167#else
168#define PTE_RPN_MAX (1UL << (32 - PTE_RPN_SHIFT))
169#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
170#endif
171
172/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
173 * pgprot changes
174 */
175#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
176 _PAGE_ACCESSED | _PAGE_SPECIAL)
177
178/* Mask of bits returned by pte_pgprot() */
179#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
180 _PAGE_WRITETHRU | _PAGE_ENDIAN | \
181 _PAGE_USER | _PAGE_ACCESSED | \
182 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
183 _PAGE_EXEC | _PAGE_HWEXEC)
184
185/*
186 * We define 2 sets of base prot bits, one for basic pages (ie,
187 * cacheable kernel and user pages) and one for non cacheable
188 * pages. We always set _PAGE_COHERENT when SMP is enabled or
189 * the processor might need it for DMA coherency.
190 */
191#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
192#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
193#else
194#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
195#endif
196#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
197
198/* Permission masks used for kernel mappings */
199#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
200#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
201 _PAGE_NO_CACHE)
202#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
203 _PAGE_NO_CACHE | _PAGE_GUARDED)
204#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC)
205#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
206#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC)
207
208#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
209 defined(CONFIG_KPROBES)
210/* We want the debuggers to be able to set breakpoints anywhere, so
211 * don't write protect the kernel text */
212#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
213#else
214#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
215#endif
216
217#define PAGE_NONE __pgprot(_PAGE_BASE)
218#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
219#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
220#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
221#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
222#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
223#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
224
225/*
226 * The PowerPC can only do execute protection on a segment (256MB) basis,
227 * not on a page basis. So we consider execute permission the same as read.
228 * Also, write permissions imply read permissions.
229 * This is the closest we can get..
230 */
231#define __P000 PAGE_NONE
232#define __P001 PAGE_READONLY_X
233#define __P010 PAGE_COPY
234#define __P011 PAGE_COPY_X
235#define __P100 PAGE_READONLY
236#define __P101 PAGE_READONLY_X
237#define __P110 PAGE_COPY
238#define __P111 PAGE_COPY_X
239
240#define __S000 PAGE_NONE
241#define __S001 PAGE_READONLY_X
242#define __S010 PAGE_SHARED
243#define __S011 PAGE_SHARED_X
244#define __S100 PAGE_READONLY
245#define __S101 PAGE_READONLY_X
246#define __S110 PAGE_SHARED
247#define __S111 PAGE_SHARED_X
248 102
249#ifndef __ASSEMBLY__ 103#ifndef __ASSEMBLY__
250/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a
251 * kernel without large page PMD support */
252extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
253 104
254/*
255 * Conversions between PTE values and page frame numbers.
256 */
257
258#define pte_pfn(x) (pte_val(x) >> PTE_RPN_SHIFT)
259#define pte_page(x) pfn_to_page(pte_pfn(x))
260
261#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |\
262 pgprot_val(prot))
263#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
264#endif /* __ASSEMBLY__ */
265
266#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
267#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
268#define pte_clear(mm, addr, ptep) \ 105#define pte_clear(mm, addr, ptep) \
269 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) 106 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
270 107
@@ -273,43 +110,6 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
273#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) 110#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
274#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) 111#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
275 112
276#ifndef __ASSEMBLY__
277/*
278 * The following only work if pte_present() is true.
279 * Undefined behaviour if not..
280 */
281static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
282static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
283static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
284static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
285static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
286
287static inline pte_t pte_wrprotect(pte_t pte) {
288 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
289static inline pte_t pte_mkclean(pte_t pte) {
290 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
291static inline pte_t pte_mkold(pte_t pte) {
292 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
293
294static inline pte_t pte_mkwrite(pte_t pte) {
295 pte_val(pte) |= _PAGE_RW; return pte; }
296static inline pte_t pte_mkdirty(pte_t pte) {
297 pte_val(pte) |= _PAGE_DIRTY; return pte; }
298static inline pte_t pte_mkyoung(pte_t pte) {
299 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
300static inline pte_t pte_mkspecial(pte_t pte) {
301 pte_val(pte) |= _PAGE_SPECIAL; return pte; }
302static inline pgprot_t pte_pgprot(pte_t pte)
303{
304 return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
305}
306
307static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
308{
309 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
310 return pte;
311}
312
313/* 113/*
314 * When flushing the tlb entry for a page, we also need to flush the hash 114 * When flushing the tlb entry for a page, we also need to flush the hash
315 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. 115 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 5a575f2905f5..768e0f08f009 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -80,82 +80,8 @@
80 * Include the PTE bits definitions 80 * Include the PTE bits definitions
81 */ 81 */
82#include <asm/pte-hash64.h> 82#include <asm/pte-hash64.h>
83#include <asm/pte-common.h>
83 84
84/* Some other useful definitions */
85#define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT))
86#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
87
88/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
89 * pgprot changes
90 */
91#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
92 _PAGE_ACCESSED | _PAGE_SPECIAL)
93
94#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
95#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
96
97
98/* Permission masks used to generate the __P and __S table,
99 *
100 * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
101 */
102#define PAGE_NONE __pgprot(_PAGE_BASE)
103#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
104#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
105#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
106#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
107#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
108#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
109
110/* Permission masks used for kernel mappings */
111#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
112#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
113 _PAGE_NO_CACHE)
114#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
115 _PAGE_NO_CACHE | _PAGE_GUARDED)
116#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC)
117#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
118#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC)
119
120/* Protection bits for use by pte_pgprot() */
121#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | \
122 _PAGE_NO_CACHE | _PAGE_WRITETHRU | \
123 _PAGE_4K_PFN | _PAGE_USER | _PAGE_RW | \
124 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
125
126
127/* We always have _PAGE_SPECIAL on 64 bit */
128#define __HAVE_ARCH_PTE_SPECIAL
129
130/* Make modules code happy. We don't set RO yet */
131#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
132
133/*
134 * POWER4 and newer have per page execute protection, older chips can only
135 * do this on a segment (256MB) basis.
136 *
137 * Also, write permissions imply read permissions.
138 * This is the closest we can get..
139 *
140 * Note due to the way vm flags are laid out, the bits are XWR
141 */
142#define __P000 PAGE_NONE
143#define __P001 PAGE_READONLY
144#define __P010 PAGE_COPY
145#define __P011 PAGE_COPY
146#define __P100 PAGE_READONLY_X
147#define __P101 PAGE_READONLY_X
148#define __P110 PAGE_COPY_X
149#define __P111 PAGE_COPY_X
150
151#define __S000 PAGE_NONE
152#define __S001 PAGE_READONLY
153#define __S010 PAGE_SHARED
154#define __S011 PAGE_SHARED
155#define __S100 PAGE_READONLY_X
156#define __S101 PAGE_READONLY_X
157#define __S110 PAGE_SHARED_X
158#define __S111 PAGE_SHARED_X
159 85
160#ifdef CONFIG_PPC_MM_SLICES 86#ifdef CONFIG_PPC_MM_SLICES
161#define HAVE_ARCH_UNMAPPED_AREA 87#define HAVE_ARCH_UNMAPPED_AREA
@@ -196,34 +122,8 @@
196#endif /* __real_pte */ 122#endif /* __real_pte */
197 123
198 124
199/*
200 * Conversion functions: convert a page and protection to a page entry,
201 * and a page entry and page directory to the page they refer to.
202 *
203 * mk_pte takes a (struct page *) as input
204 */
205#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
206
207static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
208{
209 pte_t pte;
210
211
212 pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot);
213 return pte;
214}
215
216#define pte_modify(_pte, newprot) \
217 (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
218
219#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0)
220#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
221
222/* pte_clear moved to later in this file */ 125/* pte_clear moved to later in this file */
223 126
224#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT)))
225#define pte_page(x) pfn_to_page(pte_pfn(x))
226
227#define PMD_BAD_BITS (PTE_TABLE_SIZE-1) 127#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
228#define PUD_BAD_BITS (PMD_TABLE_SIZE-1) 128#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
229 129
@@ -271,36 +171,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
271/* This now only contains the vmalloc pages */ 171/* This now only contains the vmalloc pages */
272#define pgd_offset_k(address) pgd_offset(&init_mm, address) 172#define pgd_offset_k(address) pgd_offset(&init_mm, address)
273 173
274/*
275 * The following only work if pte_present() is true.
276 * Undefined behaviour if not..
277 */
278static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
279static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
280static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
281static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
282static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
283
284static inline pte_t pte_wrprotect(pte_t pte) {
285 pte_val(pte) &= ~(_PAGE_RW); return pte; }
286static inline pte_t pte_mkclean(pte_t pte) {
287 pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
288static inline pte_t pte_mkold(pte_t pte) {
289 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
290static inline pte_t pte_mkwrite(pte_t pte) {
291 pte_val(pte) |= _PAGE_RW; return pte; }
292static inline pte_t pte_mkdirty(pte_t pte) {
293 pte_val(pte) |= _PAGE_DIRTY; return pte; }
294static inline pte_t pte_mkyoung(pte_t pte) {
295 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
296static inline pte_t pte_mkhuge(pte_t pte) {
297 return pte; }
298static inline pte_t pte_mkspecial(pte_t pte) {
299 pte_val(pte) |= _PAGE_SPECIAL; return pte; }
300static inline pgprot_t pte_pgprot(pte_t pte)
301{
302 return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
303}
304 174
305/* Atomic PTE updates */ 175/* Atomic PTE updates */
306static inline unsigned long pte_update(struct mm_struct *mm, 176static inline unsigned long pte_update(struct mm_struct *mm,
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 81574f94ea32..eb17da781128 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -25,12 +25,58 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
25# include <asm/pgtable-ppc32.h> 25# include <asm/pgtable-ppc32.h>
26#endif 26#endif
27 27
28/* Special mapping for AGP */
29#define PAGE_AGP (PAGE_KERNEL_NC)
30#define HAVE_PAGE_AGP
31
32#ifndef __ASSEMBLY__ 28#ifndef __ASSEMBLY__
33 29
30/* Generic accessors to PTE bits */
31static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
32static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
33static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
34static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
35static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
36static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
37static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
38static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
39
40/* Conversion functions: convert a page and protection to a page entry,
41 * and a page entry and page directory to the page they refer to.
42 *
43 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
44 * long for now.
45 */
46static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
47 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
48 pgprot_val(pgprot)); }
49static inline unsigned long pte_pfn(pte_t pte) {
50 return pte_val(pte) >> PTE_RPN_SHIFT; }
51
52/* Keep these as a macros to avoid include dependency mess */
53#define pte_page(x) pfn_to_page(pte_pfn(x))
54#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
55
56/* Generic modifiers for PTE bits */
57static inline pte_t pte_wrprotect(pte_t pte) {
58 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
59static inline pte_t pte_mkclean(pte_t pte) {
60 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
61static inline pte_t pte_mkold(pte_t pte) {
62 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
63static inline pte_t pte_mkwrite(pte_t pte) {
64 pte_val(pte) |= _PAGE_RW; return pte; }
65static inline pte_t pte_mkdirty(pte_t pte) {
66 pte_val(pte) |= _PAGE_DIRTY; return pte; }
67static inline pte_t pte_mkyoung(pte_t pte) {
68 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
69static inline pte_t pte_mkspecial(pte_t pte) {
70 pte_val(pte) |= _PAGE_SPECIAL; return pte; }
71static inline pte_t pte_mkhuge(pte_t pte) {
72 return pte; }
73static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
74{
75 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
76 return pte;
77}
78
79
34/* Insert a PTE, top-level function is out of line. It uses an inline 80/* Insert a PTE, top-level function is out of line. It uses an inline
35 * low level function in the respective pgtable-* files 81 * low level function in the respective pgtable-* files
36 */ 82 */
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
new file mode 100644
index 000000000000..d9740e886801
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -0,0 +1,180 @@
1/* Included from asm/pgtable-*.h only ! */
2
3/*
4 * Some bits are only used on some cpu families... Make sure that all
5 * the undefined gets a sensible default
6 */
7#ifndef _PAGE_HASHPTE
8#define _PAGE_HASHPTE 0
9#endif
10#ifndef _PAGE_SHARED
11#define _PAGE_SHARED 0
12#endif
13#ifndef _PAGE_HWWRITE
14#define _PAGE_HWWRITE 0
15#endif
16#ifndef _PAGE_HWEXEC
17#define _PAGE_HWEXEC 0
18#endif
19#ifndef _PAGE_EXEC
20#define _PAGE_EXEC 0
21#endif
22#ifndef _PAGE_ENDIAN
23#define _PAGE_ENDIAN 0
24#endif
25#ifndef _PAGE_COHERENT
26#define _PAGE_COHERENT 0
27#endif
28#ifndef _PAGE_WRITETHRU
29#define _PAGE_WRITETHRU 0
30#endif
31#ifndef _PAGE_SPECIAL
32#define _PAGE_SPECIAL 0
33#endif
34#ifndef _PAGE_4K_PFN
35#define _PAGE_4K_PFN 0
36#endif
37#ifndef _PAGE_PSIZE
38#define _PAGE_PSIZE 0
39#endif
40#ifndef _PMD_PRESENT_MASK
41#define _PMD_PRESENT_MASK _PMD_PRESENT
42#endif
43#ifndef _PMD_SIZE
44#define _PMD_SIZE 0
45#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
46#endif
47#ifndef _PAGE_KERNEL_RO
48#define _PAGE_KERNEL_RO 0
49#endif
50#ifndef _PAGE_KERNEL_RW
51#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
52#endif
53#ifndef _PAGE_HPTEFLAGS
54#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
55#endif
56#ifndef _PTE_NONE_MASK
57#define _PTE_NONE_MASK _PAGE_HPTEFLAGS
58#endif
59
60/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a
61 * kernel without large page PMD support
62 */
63#ifndef __ASSEMBLY__
64extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
65#endif /* __ASSEMBLY__ */
66
67/* Location of the PFN in the PTE. Most 32-bit platforms use the same
68 * as _PAGE_SHIFT here (ie, naturally aligned).
69 * Platform who don't just pre-define the value so we don't override it here
70 */
71#ifndef PTE_RPN_SHIFT
72#define PTE_RPN_SHIFT (PAGE_SHIFT)
73#endif
74
75/* The mask convered by the RPN must be a ULL on 32-bit platforms with
76 * 64-bit PTEs
77 */
78#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
79#define PTE_RPN_MAX (1ULL << (64 - PTE_RPN_SHIFT))
80#define PTE_RPN_MASK (~((1ULL<<PTE_RPN_SHIFT)-1))
81#else
82#define PTE_RPN_MAX (1UL << (32 - PTE_RPN_SHIFT))
83#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
84#endif
85
86/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
87 * pgprot changes
88 */
89#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
90 _PAGE_ACCESSED | _PAGE_SPECIAL)
91
92/* Mask of bits returned by pte_pgprot() */
93#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
94 _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \
95 _PAGE_USER | _PAGE_ACCESSED | \
96 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
97 _PAGE_EXEC | _PAGE_HWEXEC)
98
99/*
100 * We define 2 sets of base prot bits, one for basic pages (ie,
101 * cacheable kernel and user pages) and one for non cacheable
102 * pages. We always set _PAGE_COHERENT when SMP is enabled or
103 * the processor might need it for DMA coherency.
104 */
105#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
106#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
107#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
108#else
109#define _PAGE_BASE (_PAGE_BASE_NC)
110#endif
111
112/* Permission masks used to generate the __P and __S table,
113 *
114 * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
115 *
116 * Write permissions imply read permissions for now (we could make write-only
117 * pages on BookE but we don't bother for now). Execute permission control is
118 * possible on platforms that define _PAGE_EXEC
119 *
120 * Note due to the way vm flags are laid out, the bits are XWR
121 */
122#define PAGE_NONE __pgprot(_PAGE_BASE)
123#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
124#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
125#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
126#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
127#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
128#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
129
130#define __P000 PAGE_NONE
131#define __P001 PAGE_READONLY
132#define __P010 PAGE_COPY
133#define __P011 PAGE_COPY
134#define __P100 PAGE_READONLY_X
135#define __P101 PAGE_READONLY_X
136#define __P110 PAGE_COPY_X
137#define __P111 PAGE_COPY_X
138
139#define __S000 PAGE_NONE
140#define __S001 PAGE_READONLY
141#define __S010 PAGE_SHARED
142#define __S011 PAGE_SHARED
143#define __S100 PAGE_READONLY_X
144#define __S101 PAGE_READONLY_X
145#define __S110 PAGE_SHARED_X
146#define __S111 PAGE_SHARED_X
147
148/* Permission masks used for kernel mappings */
149#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
150#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
151 _PAGE_NO_CACHE)
152#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
153 _PAGE_NO_CACHE | _PAGE_GUARDED)
154#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC)
155#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
156#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC)
157
158/* Protection used for kernel text. We want the debuggers to be able to
159 * set breakpoints anywhere, so don't write protect the kernel text
160 * on platforms where such control is possible.
161 */
162#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
163 defined(CONFIG_KPROBES)
164#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
165#else
166#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
167#endif
168
169/* Make modules code happy. We don't set RO yet */
170#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
171
172/* Advertise special mapping type for AGP */
173#define PAGE_AGP (PAGE_KERNEL_NC)
174#define HAVE_PAGE_AGP
175
176/* Advertise support for _PAGE_SPECIAL */
177#ifdef _PAGE_SPECIAL
178#define __HAVE_ARCH_PTE_SPECIAL
179#endif
180